2024-12-10 14:54:56,181 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@57bc27f5 2024-12-10 14:54:56,209 main DEBUG Took 0.013938 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-10 14:54:56,210 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-10 14:54:56,210 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-10 14:54:56,212 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-10 14:54:56,213 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 14:54:56,251 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-10 14:54:56,279 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 14:54:56,281 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 14:54:56,282 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 14:54:56,283 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 14:54:56,283 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 14:54:56,284 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 14:54:56,285 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 14:54:56,285 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 14:54:56,286 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 14:54:56,286 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 14:54:56,287 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 14:54:56,288 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 14:54:56,288 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 14:54:56,289 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 14:54:56,289 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 14:54:56,290 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 14:54:56,290 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 14:54:56,291 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 14:54:56,291 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 14:54:56,292 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 14:54:56,292 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 14:54:56,293 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 14:54:56,293 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 14:54:56,294 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 14:54:56,294 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 14:54:56,295 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-10 14:54:56,298 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 14:54:56,300 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-10 14:54:56,304 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-10 14:54:56,304 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-10 14:54:56,306 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-10 14:54:56,306 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-10 14:54:56,327 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-10 14:54:56,338 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-10 14:54:56,342 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-10 14:54:56,342 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-10 14:54:56,343 main DEBUG createAppenders(={Console}) 2024-12-10 14:54:56,345 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@57bc27f5 initialized 2024-12-10 14:54:56,345 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@57bc27f5 2024-12-10 14:54:56,345 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@57bc27f5 OK. 2024-12-10 14:54:56,348 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-10 14:54:56,348 main DEBUG OutputStream closed 2024-12-10 14:54:56,352 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-10 14:54:56,353 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-10 14:54:56,353 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@57cf54e1 OK 2024-12-10 14:54:56,466 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-10 14:54:56,469 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-10 14:54:56,470 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-10 14:54:56,472 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-10 14:54:56,472 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-10 14:54:56,473 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-10 14:54:56,473 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-10 14:54:56,474 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-10 14:54:56,474 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-10 14:54:56,475 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-10 14:54:56,475 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-10 14:54:56,476 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-10 14:54:56,476 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-10 14:54:56,477 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-10 14:54:56,477 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-10 14:54:56,477 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-10 14:54:56,478 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-10 14:54:56,479 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-10 14:54:56,482 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-10 14:54:56,482 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@29ca3d04) with optional ClassLoader: null 2024-12-10 14:54:56,483 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-10 14:54:56,484 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@29ca3d04] started OK. 2024-12-10T14:54:56,503 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestExportSnapshot timeout: 13 mins 2024-12-10 14:54:56,507 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-10 14:54:56,508 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-10T14:54:56,872 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0 2024-12-10T14:54:56,873 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestSecureExportSnapshot timeout: 13 mins 2024-12-10T14:54:56,925 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-10T14:54:57,177 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-10T14:54:57,179 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8, deleteOnExit=true 2024-12-10T14:54:57,179 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-10T14:54:57,180 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/test.cache.data in system properties and HBase conf 2024-12-10T14:54:57,181 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/hadoop.tmp.dir in system properties and HBase conf 2024-12-10T14:54:57,181 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/hadoop.log.dir in system properties and HBase conf 2024-12-10T14:54:57,182 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-10T14:54:57,183 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-10T14:54:57,183 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-10T14:54:57,298 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-10T14:54:57,306 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-10T14:54:57,314 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-10T14:54:57,317 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-10T14:54:57,319 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-10T14:54:57,320 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-10T14:54:57,321 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-10T14:54:57,325 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-10T14:54:57,325 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-10T14:54:57,326 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-10T14:54:57,326 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/nfs.dump.dir in system properties and HBase conf 2024-12-10T14:54:57,327 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/java.io.tmpdir in system properties and HBase conf 2024-12-10T14:54:57,328 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-10T14:54:57,328 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-10T14:54:57,329 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-10T14:54:58,438 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-10T14:54:58,557 INFO [Time-limited test {}] log.Log(170): Logging initialized @3527ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-10T14:54:58,659 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-10T14:54:58,754 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-10T14:54:58,798 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-10T14:54:58,799 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-10T14:54:58,801 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-10T14:54:58,824 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-10T14:54:58,831 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1a82d853{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/hadoop.log.dir/,AVAILABLE} 2024-12-10T14:54:58,832 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@343317a8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-10T14:54:59,084 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7883a2cb{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/java.io.tmpdir/jetty-localhost-45609-hadoop-hdfs-3_4_1-tests_jar-_-any-1340771815993441313/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-10T14:54:59,095 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2d3d9b09{HTTP/1.1, (http/1.1)}{localhost:45609} 2024-12-10T14:54:59,096 INFO [Time-limited test {}] server.Server(415): Started @4068ms 2024-12-10T14:54:59,561 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-10T14:54:59,567 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-10T14:54:59,568 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-10T14:54:59,569 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-10T14:54:59,569 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-10T14:54:59,570 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7622634b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/hadoop.log.dir/,AVAILABLE} 2024-12-10T14:54:59,570 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6d5648e9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-10T14:54:59,700 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5a633356{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/java.io.tmpdir/jetty-localhost-44557-hadoop-hdfs-3_4_1-tests_jar-_-any-18444611518813451881/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-10T14:54:59,701 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@39bccd2b{HTTP/1.1, (http/1.1)}{localhost:44557} 2024-12-10T14:54:59,702 INFO [Time-limited test {}] server.Server(415): Started @4674ms 2024-12-10T14:54:59,779 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-10T14:54:59,923 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-10T14:54:59,930 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-10T14:54:59,937 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-10T14:54:59,937 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-10T14:54:59,937 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-10T14:54:59,939 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@45f72ff{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/hadoop.log.dir/,AVAILABLE} 2024-12-10T14:54:59,940 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@a110049{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-10T14:55:00,132 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@14090edc{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/java.io.tmpdir/jetty-localhost-37783-hadoop-hdfs-3_4_1-tests_jar-_-any-11629086731278001682/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-10T14:55:00,133 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5be8101a{HTTP/1.1, (http/1.1)}{localhost:37783} 2024-12-10T14:55:00,134 INFO [Time-limited test {}] server.Server(415): Started @5106ms 2024-12-10T14:55:00,140 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-10T14:55:00,205 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-10T14:55:00,212 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-10T14:55:00,229 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-10T14:55:00,230 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-10T14:55:00,230 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-10T14:55:00,234 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2e1b48b6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/hadoop.log.dir/,AVAILABLE} 2024-12-10T14:55:00,235 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7f4d5ab4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-10T14:55:00,355 WARN [Thread-106 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data3/current/BP-279726562-172.17.0.2-1733842498143/current, will proceed with Du for space computation calculation, 2024-12-10T14:55:00,355 WARN [Thread-108 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data4/current/BP-279726562-172.17.0.2-1733842498143/current, will proceed with Du for space computation calculation, 2024-12-10T14:55:00,356 WARN [Thread-107 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data2/current/BP-279726562-172.17.0.2-1733842498143/current, will proceed with Du for space computation calculation, 2024-12-10T14:55:00,359 WARN [Thread-105 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data1/current/BP-279726562-172.17.0.2-1733842498143/current, will proceed with Du for space computation calculation, 2024-12-10T14:55:00,413 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6bdef31c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/java.io.tmpdir/jetty-localhost-41005-hadoop-hdfs-3_4_1-tests_jar-_-any-9292834270673826512/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-10T14:55:00,414 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2cc41f37{HTTP/1.1, (http/1.1)}{localhost:41005} 2024-12-10T14:55:00,415 INFO [Time-limited test {}] server.Server(415): Started @5387ms 2024-12-10T14:55:00,418 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-10T14:55:00,451 WARN [Thread-82 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-10T14:55:00,452 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-10T14:55:00,552 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf74467ebab302fe8 with lease ID 0x655c5d321cc42b4: Processing first storage report for DS-957b27d6-49d5-458e-948b-d34ad2b2482b from datanode DatanodeRegistration(127.0.0.1:34057, datanodeUuid=bd32721d-283c-428d-b13c-f1f8357b234b, infoPort=35659, infoSecurePort=0, ipcPort=39217, storageInfo=lv=-57;cid=testClusterID;nsid=1957005942;c=1733842498143) 2024-12-10T14:55:00,554 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf74467ebab302fe8 with lease ID 0x655c5d321cc42b4: from storage DS-957b27d6-49d5-458e-948b-d34ad2b2482b node DatanodeRegistration(127.0.0.1:34057, datanodeUuid=bd32721d-283c-428d-b13c-f1f8357b234b, infoPort=35659, infoSecurePort=0, ipcPort=39217, storageInfo=lv=-57;cid=testClusterID;nsid=1957005942;c=1733842498143), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-12-10T14:55:00,555 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc7a80dff71b747f4 with lease ID 0x655c5d321cc42b5: Processing first storage report for DS-0960cec0-88f5-4837-9180-abd7da2449be from datanode DatanodeRegistration(127.0.0.1:39383, datanodeUuid=fd620eca-af69-4116-9d3b-064c4e513dae, infoPort=34921, infoSecurePort=0, ipcPort=36315, storageInfo=lv=-57;cid=testClusterID;nsid=1957005942;c=1733842498143) 2024-12-10T14:55:00,555 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc7a80dff71b747f4 with lease ID 0x655c5d321cc42b5: from storage DS-0960cec0-88f5-4837-9180-abd7da2449be node DatanodeRegistration(127.0.0.1:39383, datanodeUuid=fd620eca-af69-4116-9d3b-064c4e513dae, infoPort=34921, infoSecurePort=0, ipcPort=36315, storageInfo=lv=-57;cid=testClusterID;nsid=1957005942;c=1733842498143), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-10T14:55:00,555 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf74467ebab302fe8 with lease ID 0x655c5d321cc42b4: Processing first storage report for DS-ba16d127-0f6d-4bcc-b4a4-9c24d1786cd6 from datanode DatanodeRegistration(127.0.0.1:34057, datanodeUuid=bd32721d-283c-428d-b13c-f1f8357b234b, infoPort=35659, infoSecurePort=0, ipcPort=39217, storageInfo=lv=-57;cid=testClusterID;nsid=1957005942;c=1733842498143) 2024-12-10T14:55:00,556 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf74467ebab302fe8 with lease ID 0x655c5d321cc42b4: from storage DS-ba16d127-0f6d-4bcc-b4a4-9c24d1786cd6 node DatanodeRegistration(127.0.0.1:34057, datanodeUuid=bd32721d-283c-428d-b13c-f1f8357b234b, infoPort=35659, infoSecurePort=0, ipcPort=39217, storageInfo=lv=-57;cid=testClusterID;nsid=1957005942;c=1733842498143), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-10T14:55:00,556 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc7a80dff71b747f4 with lease ID 0x655c5d321cc42b5: Processing first storage report for DS-5dbf4329-f963-4d81-b1e9-ad989fc310d9 from datanode DatanodeRegistration(127.0.0.1:39383, datanodeUuid=fd620eca-af69-4116-9d3b-064c4e513dae, infoPort=34921, infoSecurePort=0, ipcPort=36315, storageInfo=lv=-57;cid=testClusterID;nsid=1957005942;c=1733842498143) 2024-12-10T14:55:00,556 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc7a80dff71b747f4 with lease ID 0x655c5d321cc42b5: from storage DS-5dbf4329-f963-4d81-b1e9-ad989fc310d9 node DatanodeRegistration(127.0.0.1:39383, datanodeUuid=fd620eca-af69-4116-9d3b-064c4e513dae, infoPort=34921, infoSecurePort=0, ipcPort=36315, storageInfo=lv=-57;cid=testClusterID;nsid=1957005942;c=1733842498143), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-10T14:55:00,633 WARN [Thread-139 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data5/current/BP-279726562-172.17.0.2-1733842498143/current, will proceed with Du for space computation calculation, 2024-12-10T14:55:00,646 WARN [Thread-140 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data6/current/BP-279726562-172.17.0.2-1733842498143/current, will proceed with Du for space computation calculation, 2024-12-10T14:55:00,709 WARN [Thread-129 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-10T14:55:00,717 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8a04822e5bacbeec with lease ID 0x655c5d321cc42b6: Processing first storage report for DS-3591a6d7-1349-4590-be89-c09cb9965731 from datanode DatanodeRegistration(127.0.0.1:39483, datanodeUuid=a7638ed4-3cb9-4cf4-ba7d-e2f79ee7a042, infoPort=39813, infoSecurePort=0, ipcPort=40961, storageInfo=lv=-57;cid=testClusterID;nsid=1957005942;c=1733842498143) 2024-12-10T14:55:00,718 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8a04822e5bacbeec with lease ID 0x655c5d321cc42b6: from storage DS-3591a6d7-1349-4590-be89-c09cb9965731 node DatanodeRegistration(127.0.0.1:39483, datanodeUuid=a7638ed4-3cb9-4cf4-ba7d-e2f79ee7a042, infoPort=39813, infoSecurePort=0, ipcPort=40961, storageInfo=lv=-57;cid=testClusterID;nsid=1957005942;c=1733842498143), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-10T14:55:00,718 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8a04822e5bacbeec with lease ID 0x655c5d321cc42b6: Processing first storage report for DS-35b77d2f-476a-4505-aacf-1a2a3d06c1a9 from datanode DatanodeRegistration(127.0.0.1:39483, datanodeUuid=a7638ed4-3cb9-4cf4-ba7d-e2f79ee7a042, infoPort=39813, infoSecurePort=0, ipcPort=40961, storageInfo=lv=-57;cid=testClusterID;nsid=1957005942;c=1733842498143) 2024-12-10T14:55:00,718 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8a04822e5bacbeec with lease ID 0x655c5d321cc42b6: from storage DS-35b77d2f-476a-4505-aacf-1a2a3d06c1a9 node DatanodeRegistration(127.0.0.1:39483, datanodeUuid=a7638ed4-3cb9-4cf4-ba7d-e2f79ee7a042, infoPort=39813, infoSecurePort=0, ipcPort=40961, storageInfo=lv=-57;cid=testClusterID;nsid=1957005942;c=1733842498143), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-10T14:55:00,932 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0 2024-12-10T14:55:01,060 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/zookeeper_0, clientPort=57176, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-10T14:55:01,077 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=57176 2024-12-10T14:55:01,111 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T14:55:01,114 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T14:55:01,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741825_1001 (size=7) 2024-12-10T14:55:01,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741825_1001 (size=7) 2024-12-10T14:55:01,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741825_1001 (size=7) 2024-12-10T14:55:01,880 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef with version=8 2024-12-10T14:55:01,880 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/hbase-staging 2024-12-10T14:55:02,054 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-10T14:55:02,343 INFO [Time-limited test {}] client.ConnectionUtils(129): master/db1d50717577:0 server-side Connection retries=45 2024-12-10T14:55:02,364 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T14:55:02,365 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-10T14:55:02,365 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-10T14:55:02,365 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T14:55:02,365 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-10T14:55:02,533 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-10T14:55:02,597 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-10T14:55:02,606 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-10T14:55:02,610 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-10T14:55:02,646 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 18499 (auto-detected) 2024-12-10T14:55:02,647 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-10T14:55:02,668 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:36533 2024-12-10T14:55:02,677 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T14:55:02,681 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T14:55:02,700 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:36533 connecting to ZooKeeper ensemble=127.0.0.1:57176 2024-12-10T14:55:02,741 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:365330x0, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-10T14:55:02,744 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:36533-0x1019cdee8310000 connected 2024-12-10T14:55:02,780 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-10T14:55:02,783 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T14:55:02,802 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-10T14:55:02,808 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36533 2024-12-10T14:55:02,809 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36533 2024-12-10T14:55:02,810 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36533 2024-12-10T14:55:02,812 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36533 2024-12-10T14:55:02,812 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36533 2024-12-10T14:55:02,822 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef, hbase.cluster.distributed=false 2024-12-10T14:55:02,888 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/db1d50717577:0 server-side Connection retries=45 2024-12-10T14:55:02,889 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T14:55:02,889 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-10T14:55:02,889 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-10T14:55:02,890 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T14:55:02,890 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-10T14:55:02,893 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-10T14:55:02,896 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-10T14:55:02,897 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:41775 2024-12-10T14:55:02,899 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-10T14:55:02,907 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-10T14:55:02,908 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T14:55:02,912 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T14:55:02,916 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:41775 connecting to ZooKeeper ensemble=127.0.0.1:57176 2024-12-10T14:55:02,920 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:417750x0, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-10T14:55:02,921 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:417750x0, quorum=127.0.0.1:57176, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-10T14:55:02,921 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41775-0x1019cdee8310001 connected 2024-12-10T14:55:02,923 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T14:55:02,924 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-10T14:55:02,929 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41775 2024-12-10T14:55:02,929 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41775 2024-12-10T14:55:02,932 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41775 2024-12-10T14:55:02,935 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41775 2024-12-10T14:55:02,936 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41775 2024-12-10T14:55:02,957 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/db1d50717577:0 server-side Connection retries=45 2024-12-10T14:55:02,957 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T14:55:02,957 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-10T14:55:02,958 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-10T14:55:02,958 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T14:55:02,958 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-10T14:55:02,958 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-10T14:55:02,959 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-10T14:55:02,960 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:37707 2024-12-10T14:55:02,961 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-10T14:55:02,964 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-10T14:55:02,965 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T14:55:02,969 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T14:55:02,974 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:37707 connecting to ZooKeeper ensemble=127.0.0.1:57176 2024-12-10T14:55:02,983 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:377070x0, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-10T14:55:02,984 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:377070x0, quorum=127.0.0.1:57176, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-10T14:55:02,984 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37707-0x1019cdee8310002 connected 2024-12-10T14:55:02,986 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T14:55:02,987 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-10T14:55:02,988 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37707 2024-12-10T14:55:02,989 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37707 2024-12-10T14:55:02,989 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37707 2024-12-10T14:55:02,992 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37707 2024-12-10T14:55:02,996 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37707 2024-12-10T14:55:03,016 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/db1d50717577:0 server-side Connection retries=45 2024-12-10T14:55:03,016 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T14:55:03,016 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-10T14:55:03,017 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-10T14:55:03,017 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T14:55:03,017 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-10T14:55:03,017 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-10T14:55:03,018 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-10T14:55:03,019 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:45255 2024-12-10T14:55:03,020 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-10T14:55:03,024 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-10T14:55:03,026 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T14:55:03,030 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T14:55:03,036 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:45255 connecting to ZooKeeper ensemble=127.0.0.1:57176 2024-12-10T14:55:03,039 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:452550x0, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-10T14:55:03,040 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:452550x0, quorum=127.0.0.1:57176, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-10T14:55:03,042 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:452550x0, quorum=127.0.0.1:57176, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T14:55:03,043 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:452550x0, quorum=127.0.0.1:57176, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-10T14:55:03,044 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45255-0x1019cdee8310003 connected 2024-12-10T14:55:03,046 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45255 2024-12-10T14:55:03,048 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45255 2024-12-10T14:55:03,049 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45255 2024-12-10T14:55:03,051 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45255 2024-12-10T14:55:03,052 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45255 2024-12-10T14:55:03,055 INFO [master/db1d50717577:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/db1d50717577,36533,1733842502047 2024-12-10T14:55:03,062 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T14:55:03,062 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T14:55:03,062 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T14:55:03,062 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45255-0x1019cdee8310003, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T14:55:03,065 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/db1d50717577,36533,1733842502047 2024-12-10T14:55:03,073 DEBUG [M:0;db1d50717577:36533 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;db1d50717577:36533 2024-12-10T14:55:03,092 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-10T14:55:03,092 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-10T14:55:03,092 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45255-0x1019cdee8310003, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-10T14:55:03,097 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-10T14:55:03,097 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T14:55:03,097 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T14:55:03,097 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T14:55:03,097 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45255-0x1019cdee8310003, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T14:55:03,098 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-10T14:55:03,099 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-10T14:55:03,099 INFO [master/db1d50717577:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/db1d50717577,36533,1733842502047 from backup master directory 2024-12-10T14:55:03,104 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T14:55:03,104 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/db1d50717577,36533,1733842502047 2024-12-10T14:55:03,105 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45255-0x1019cdee8310003, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T14:55:03,105 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T14:55:03,105 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T14:55:03,106 WARN [master/db1d50717577:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-10T14:55:03,106 INFO [master/db1d50717577:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=db1d50717577,36533,1733842502047 2024-12-10T14:55:03,108 INFO [master/db1d50717577:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-10T14:55:03,112 INFO [master/db1d50717577:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-10T14:55:03,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741826_1002 (size=42) 2024-12-10T14:55:03,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741826_1002 (size=42) 2024-12-10T14:55:03,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741826_1002 (size=42) 2024-12-10T14:55:03,213 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/hbase.id with ID: 8fc014bb-8322-4c78-ba78-18838b166a53 2024-12-10T14:55:03,274 INFO [master/db1d50717577:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T14:55:03,311 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45255-0x1019cdee8310003, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T14:55:03,311 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T14:55:03,311 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T14:55:03,315 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T14:55:03,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741827_1003 (size=196) 2024-12-10T14:55:03,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741827_1003 (size=196) 2024-12-10T14:55:03,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741827_1003 (size=196) 2024-12-10T14:55:03,757 INFO [master/db1d50717577:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-10T14:55:03,759 INFO [master/db1d50717577:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-10T14:55:03,780 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:147) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:160) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProvider(WALFactory.java:200) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:232) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:207) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:402) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:973) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2470) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:590) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.HMaster.lambda$run$1(HMaster.java:587) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:55:03,786 INFO [master/db1d50717577:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T14:55:03,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741828_1004 (size=1189) 2024-12-10T14:55:03,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741828_1004 (size=1189) 2024-12-10T14:55:03,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741828_1004 (size=1189) 2024-12-10T14:55:03,865 INFO [master/db1d50717577:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/MasterData/data/master/store 2024-12-10T14:55:03,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741829_1005 (size=34) 2024-12-10T14:55:03,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741829_1005 (size=34) 2024-12-10T14:55:03,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741829_1005 (size=34) 2024-12-10T14:55:03,903 INFO [master/db1d50717577:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-10T14:55:03,903 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:55:03,904 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-10T14:55:03,905 INFO [master/db1d50717577:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T14:55:03,905 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T14:55:03,905 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-10T14:55:03,905 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T14:55:03,905 INFO [master/db1d50717577:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T14:55:03,905 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-10T14:55:03,909 WARN [master/db1d50717577:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/MasterData/data/master/store/.initializing 2024-12-10T14:55:03,909 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/MasterData/WALs/db1d50717577,36533,1733842502047 2024-12-10T14:55:03,925 INFO [master/db1d50717577:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-10T14:55:03,944 INFO [master/db1d50717577:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=db1d50717577%2C36533%2C1733842502047, suffix=, logDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/MasterData/WALs/db1d50717577,36533,1733842502047, archiveDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/MasterData/oldWALs, maxLogs=10 2024-12-10T14:55:03,973 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/MasterData/WALs/db1d50717577,36533,1733842502047/db1d50717577%2C36533%2C1733842502047.1733842503951, exclude list is [], retry=0 2024-12-10T14:55:03,996 DEBUG [RS-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34057,DS-957b27d6-49d5-458e-948b-d34ad2b2482b,DISK] 2024-12-10T14:55:03,996 DEBUG [RS-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39483,DS-3591a6d7-1349-4590-be89-c09cb9965731,DISK] 2024-12-10T14:55:03,996 DEBUG [RS-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39383,DS-0960cec0-88f5-4837-9180-abd7da2449be,DISK] 2024-12-10T14:55:04,000 DEBUG [RS-EventLoopGroup-5-3 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-10T14:55:04,054 INFO [master/db1d50717577:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/MasterData/WALs/db1d50717577,36533,1733842502047/db1d50717577%2C36533%2C1733842502047.1733842503951 2024-12-10T14:55:04,055 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34921:34921),(127.0.0.1/127.0.0.1:35659:35659),(127.0.0.1/127.0.0.1:39813:39813)] 2024-12-10T14:55:04,056 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-10T14:55:04,057 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:55:04,061 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-10T14:55:04,062 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-10T14:55:04,107 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-10T14:55:04,136 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-10T14:55:04,141 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:55:04,144 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T14:55:04,145 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-10T14:55:04,149 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-10T14:55:04,149 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:55:04,151 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T14:55:04,151 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-10T14:55:04,155 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-10T14:55:04,156 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:55:04,157 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T14:55:04,158 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-10T14:55:04,161 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-10T14:55:04,161 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:55:04,162 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T14:55:04,166 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-10T14:55:04,167 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-10T14:55:04,178 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-10T14:55:04,182 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-10T14:55:04,187 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T14:55:04,189 INFO [master/db1d50717577:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73271010, jitterRate=0.09182313084602356}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-10T14:55:04,195 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-10T14:55:04,196 INFO [master/db1d50717577:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-10T14:55:04,239 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7393eea5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:55:04,290 INFO [master/db1d50717577:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-10T14:55:04,306 INFO [master/db1d50717577:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-10T14:55:04,306 INFO [master/db1d50717577:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-10T14:55:04,309 INFO [master/db1d50717577:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-10T14:55:04,317 INFO [master/db1d50717577:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 8 msec 2024-12-10T14:55:04,323 INFO [master/db1d50717577:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 5 msec 2024-12-10T14:55:04,323 INFO [master/db1d50717577:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-10T14:55:04,355 INFO [master/db1d50717577:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-10T14:55:04,370 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-10T14:55:04,373 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-10T14:55:04,376 INFO [master/db1d50717577:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-10T14:55:04,378 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-10T14:55:04,379 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-10T14:55:04,382 INFO [master/db1d50717577:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-10T14:55:04,390 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-10T14:55:04,393 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-10T14:55:04,394 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-10T14:55:04,395 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-10T14:55:04,405 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-10T14:55:04,407 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-10T14:55:04,411 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45255-0x1019cdee8310003, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-10T14:55:04,411 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-10T14:55:04,412 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45255-0x1019cdee8310003, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T14:55:04,412 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T14:55:04,412 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-10T14:55:04,412 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T14:55:04,412 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-10T14:55:04,412 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T14:55:04,417 INFO [master/db1d50717577:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=db1d50717577,36533,1733842502047, sessionid=0x1019cdee8310000, setting cluster-up flag (Was=false) 2024-12-10T14:55:04,434 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T14:55:04,434 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45255-0x1019cdee8310003, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T14:55:04,434 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T14:55:04,435 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T14:55:04,460 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-10T14:55:04,464 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=db1d50717577,36533,1733842502047 2024-12-10T14:55:04,475 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45255-0x1019cdee8310003, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T14:55:04,475 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T14:55:04,475 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T14:55:04,475 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T14:55:04,483 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-10T14:55:04,485 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=db1d50717577,36533,1733842502047 2024-12-10T14:55:04,578 DEBUG [RS:0;db1d50717577:41775 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;db1d50717577:41775 2024-12-10T14:55:04,588 DEBUG [RS:1;db1d50717577:37707 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;db1d50717577:37707 2024-12-10T14:55:04,590 INFO [RS:1;db1d50717577:37707 {}] regionserver.HRegionServer(1008): ClusterId : 8fc014bb-8322-4c78-ba78-18838b166a53 2024-12-10T14:55:04,592 INFO [RS:0;db1d50717577:41775 {}] regionserver.HRegionServer(1008): ClusterId : 8fc014bb-8322-4c78-ba78-18838b166a53 2024-12-10T14:55:04,595 DEBUG [RS:1;db1d50717577:37707 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-10T14:55:04,598 DEBUG [RS:0;db1d50717577:41775 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-10T14:55:04,598 DEBUG [RS:2;db1d50717577:45255 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;db1d50717577:45255 2024-12-10T14:55:04,601 INFO [RS:2;db1d50717577:45255 {}] regionserver.HRegionServer(1008): ClusterId : 8fc014bb-8322-4c78-ba78-18838b166a53 2024-12-10T14:55:04,603 DEBUG [RS:2;db1d50717577:45255 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-10T14:55:04,604 DEBUG [RS:0;db1d50717577:41775 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-10T14:55:04,604 DEBUG [RS:0;db1d50717577:41775 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-10T14:55:04,609 DEBUG [RS:0;db1d50717577:41775 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-10T14:55:04,610 DEBUG [RS:0;db1d50717577:41775 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@532ce98f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:55:04,612 DEBUG [RS:1;db1d50717577:37707 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-10T14:55:04,612 DEBUG [RS:1;db1d50717577:37707 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-10T14:55:04,616 DEBUG [RS:2;db1d50717577:45255 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-10T14:55:04,616 DEBUG [RS:2;db1d50717577:45255 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-10T14:55:04,616 DEBUG [RS:1;db1d50717577:37707 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-10T14:55:04,617 DEBUG [RS:1;db1d50717577:37707 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4013ffce, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:55:04,619 DEBUG [RS:2;db1d50717577:45255 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-10T14:55:04,620 DEBUG [RS:2;db1d50717577:45255 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e1e52b1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:55:04,625 DEBUG [RS:0;db1d50717577:41775 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2f1ac699, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=db1d50717577/172.17.0.2:0 2024-12-10T14:55:04,629 DEBUG [RS:1;db1d50717577:37707 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@18148ee9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=db1d50717577/172.17.0.2:0 2024-12-10T14:55:04,629 INFO [RS:0;db1d50717577:41775 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-10T14:55:04,629 INFO [RS:1;db1d50717577:37707 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-10T14:55:04,629 INFO [RS:0;db1d50717577:41775 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-10T14:55:04,629 INFO [RS:1;db1d50717577:37707 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-10T14:55:04,635 DEBUG [RS:2;db1d50717577:45255 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@49e8ec0d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=db1d50717577/172.17.0.2:0 2024-12-10T14:55:04,635 INFO [RS:2;db1d50717577:45255 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-10T14:55:04,635 INFO [RS:2;db1d50717577:45255 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-10T14:55:04,642 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] master.HMaster(3390): Registered master coprocessor service: service=AccessControlService 2024-12-10T14:55:04,642 DEBUG [RS:2;db1d50717577:45255 {}] regionserver.HRegionServer(900): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-10T14:55:04,644 INFO [master/db1d50717577:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-10T14:55:04,644 DEBUG [RS:0;db1d50717577:41775 {}] regionserver.HRegionServer(900): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-10T14:55:04,644 DEBUG [RS:1;db1d50717577:37707 {}] regionserver.HRegionServer(900): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-10T14:55:04,644 INFO [master/db1d50717577:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver loaded, priority=536870912. 2024-12-10T14:55:04,644 INFO [RS:0;db1d50717577:41775 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-10T14:55:04,644 INFO [RS:1;db1d50717577:37707 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-10T14:55:04,645 DEBUG [RS:1;db1d50717577:37707 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-10T14:55:04,645 DEBUG [RS:0;db1d50717577:41775 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-10T14:55:04,644 INFO [RS:2;db1d50717577:45255 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-10T14:55:04,645 DEBUG [RS:2;db1d50717577:45255 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-10T14:55:04,648 INFO [RS:0;db1d50717577:41775 {}] regionserver.HRegionServer(3073): reportForDuty to master=db1d50717577,36533,1733842502047 with isa=db1d50717577/172.17.0.2:41775, startcode=1733842502887 2024-12-10T14:55:04,648 INFO [RS:1;db1d50717577:37707 {}] regionserver.HRegionServer(3073): reportForDuty to master=db1d50717577,36533,1733842502047 with isa=db1d50717577/172.17.0.2:37707, startcode=1733842502956 2024-12-10T14:55:04,650 INFO [RS:2;db1d50717577:45255 {}] regionserver.HRegionServer(3073): reportForDuty to master=db1d50717577,36533,1733842502047 with isa=db1d50717577/172.17.0.2:45255, startcode=1733842503015 2024-12-10T14:55:04,667 DEBUG [RS:1;db1d50717577:37707 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-10T14:55:04,667 DEBUG [RS:2;db1d50717577:45255 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-10T14:55:04,667 DEBUG [RS:0;db1d50717577:41775 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-10T14:55:04,721 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39769, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-10T14:55:04,720 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33843, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-10T14:55:04,720 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33481, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-10T14:55:04,735 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-10T14:55:04,741 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36533 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3280) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:593) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:55:04,745 INFO [master/db1d50717577:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-10T14:55:04,748 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36533 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3280) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:593) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:55:04,749 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36533 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3280) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:593) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:55:04,750 INFO [master/db1d50717577:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-10T14:55:04,765 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: db1d50717577,36533,1733842502047 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-10T14:55:04,772 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/db1d50717577:0, corePoolSize=5, maxPoolSize=5 2024-12-10T14:55:04,772 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/db1d50717577:0, corePoolSize=5, maxPoolSize=5 2024-12-10T14:55:04,773 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/db1d50717577:0, corePoolSize=5, maxPoolSize=5 2024-12-10T14:55:04,773 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/db1d50717577:0, corePoolSize=5, maxPoolSize=5 2024-12-10T14:55:04,773 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/db1d50717577:0, corePoolSize=10, maxPoolSize=10 2024-12-10T14:55:04,773 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/db1d50717577:0, corePoolSize=1, maxPoolSize=1 2024-12-10T14:55:04,773 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/db1d50717577:0, corePoolSize=2, maxPoolSize=2 2024-12-10T14:55:04,774 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/db1d50717577:0, corePoolSize=1, maxPoolSize=1 2024-12-10T14:55:04,778 DEBUG [RS:1;db1d50717577:37707 {}] regionserver.HRegionServer(3097): Master is not running yet 2024-12-10T14:55:04,778 DEBUG [RS:2;db1d50717577:45255 {}] regionserver.HRegionServer(3097): Master is not running yet 2024-12-10T14:55:04,778 WARN [RS:1;db1d50717577:37707 {}] regionserver.HRegionServer(1099): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-10T14:55:04,778 DEBUG [RS:0;db1d50717577:41775 {}] regionserver.HRegionServer(3097): Master is not running yet 2024-12-10T14:55:04,778 WARN [RS:0;db1d50717577:41775 {}] regionserver.HRegionServer(1099): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-10T14:55:04,778 WARN [RS:2;db1d50717577:45255 {}] regionserver.HRegionServer(1099): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-10T14:55:04,791 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-10T14:55:04,792 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-10T14:55:04,793 INFO [master/db1d50717577:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733842534793 2024-12-10T14:55:04,795 INFO [master/db1d50717577:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-10T14:55:04,796 INFO [master/db1d50717577:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-10T14:55:04,797 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:55:04,798 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-10T14:55:04,801 INFO [master/db1d50717577:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-10T14:55:04,802 INFO [master/db1d50717577:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-10T14:55:04,802 INFO [master/db1d50717577:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-10T14:55:04,803 INFO [master/db1d50717577:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-10T14:55:04,808 INFO [master/db1d50717577:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-10T14:55:04,821 INFO [master/db1d50717577:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-10T14:55:04,823 INFO [master/db1d50717577:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-10T14:55:04,823 INFO [master/db1d50717577:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-10T14:55:04,833 INFO [master/db1d50717577:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-10T14:55:04,833 INFO [master/db1d50717577:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-10T14:55:04,844 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/db1d50717577:0:becomeActiveMaster-HFileCleaner.large.0-1733842504835,5,FailOnTimeoutGroup] 2024-12-10T14:55:04,846 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/db1d50717577:0:becomeActiveMaster-HFileCleaner.small.0-1733842504845,5,FailOnTimeoutGroup] 2024-12-10T14:55:04,847 INFO [master/db1d50717577:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-10T14:55:04,847 INFO [master/db1d50717577:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-10T14:55:04,849 INFO [master/db1d50717577:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-10T14:55:04,849 INFO [master/db1d50717577:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-10T14:55:04,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741831_1007 (size=1039) 2024-12-10T14:55:04,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741831_1007 (size=1039) 2024-12-10T14:55:04,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741831_1007 (size=1039) 2024-12-10T14:55:04,872 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-10T14:55:04,872 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef 2024-12-10T14:55:04,880 INFO [RS:1;db1d50717577:37707 {}] regionserver.HRegionServer(3073): reportForDuty to master=db1d50717577,36533,1733842502047 with isa=db1d50717577/172.17.0.2:37707, startcode=1733842502956 2024-12-10T14:55:04,880 INFO [RS:2;db1d50717577:45255 {}] regionserver.HRegionServer(3073): reportForDuty to master=db1d50717577,36533,1733842502047 with isa=db1d50717577/172.17.0.2:45255, startcode=1733842503015 2024-12-10T14:55:04,880 INFO [RS:0;db1d50717577:41775 {}] regionserver.HRegionServer(3073): reportForDuty to master=db1d50717577,36533,1733842502047 with isa=db1d50717577/172.17.0.2:41775, startcode=1733842502887 2024-12-10T14:55:04,881 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36533 {}] master.ServerManager(332): Checking decommissioned status of RegionServer db1d50717577,37707,1733842502956 2024-12-10T14:55:04,884 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36533 {}] master.ServerManager(486): Registering regionserver=db1d50717577,37707,1733842502956 2024-12-10T14:55:04,896 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36533 {}] master.ServerManager(332): Checking decommissioned status of RegionServer db1d50717577,41775,1733842502887 2024-12-10T14:55:04,896 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36533 {}] master.ServerManager(486): Registering regionserver=db1d50717577,41775,1733842502887 2024-12-10T14:55:04,897 DEBUG [RS:1;db1d50717577:37707 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef 2024-12-10T14:55:04,897 DEBUG [RS:1;db1d50717577:37707 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:36449 2024-12-10T14:55:04,897 DEBUG [RS:1;db1d50717577:37707 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-10T14:55:04,902 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-10T14:55:04,903 DEBUG [RS:1;db1d50717577:37707 {}] zookeeper.ZKUtil(111): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/db1d50717577,37707,1733842502956 2024-12-10T14:55:04,903 WARN [RS:1;db1d50717577:37707 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-10T14:55:04,903 INFO [RS:1;db1d50717577:37707 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T14:55:04,903 DEBUG [RS:1;db1d50717577:37707 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/WALs/db1d50717577,37707,1733842502956 2024-12-10T14:55:04,906 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36533 {}] master.ServerManager(332): Checking decommissioned status of RegionServer db1d50717577,45255,1733842503015 2024-12-10T14:55:04,906 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36533 {}] master.ServerManager(486): Registering regionserver=db1d50717577,45255,1733842503015 2024-12-10T14:55:04,906 DEBUG [RS:0;db1d50717577:41775 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef 2024-12-10T14:55:04,907 DEBUG [RS:0;db1d50717577:41775 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:36449 2024-12-10T14:55:04,907 DEBUG [RS:0;db1d50717577:41775 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-10T14:55:04,913 DEBUG [RS:2;db1d50717577:45255 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef 2024-12-10T14:55:04,913 DEBUG [RS:2;db1d50717577:45255 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:36449 2024-12-10T14:55:04,913 DEBUG [RS:2;db1d50717577:45255 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-10T14:55:04,913 DEBUG [RS:0;db1d50717577:41775 {}] zookeeper.ZKUtil(111): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/db1d50717577,41775,1733842502887 2024-12-10T14:55:04,913 WARN [RS:0;db1d50717577:41775 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-10T14:55:04,914 INFO [RS:0;db1d50717577:41775 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T14:55:04,914 DEBUG [RS:0;db1d50717577:41775 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/WALs/db1d50717577,41775,1733842502887 2024-12-10T14:55:04,921 DEBUG [RS:2;db1d50717577:45255 {}] zookeeper.ZKUtil(111): regionserver:45255-0x1019cdee8310003, quorum=127.0.0.1:57176, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/db1d50717577,45255,1733842503015 2024-12-10T14:55:04,921 WARN [RS:2;db1d50717577:45255 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-10T14:55:04,921 INFO [RS:2;db1d50717577:45255 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T14:55:04,922 DEBUG [RS:2;db1d50717577:45255 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/WALs/db1d50717577,45255,1733842503015 2024-12-10T14:55:04,933 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [db1d50717577,45255,1733842503015] 2024-12-10T14:55:04,933 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [db1d50717577,41775,1733842502887] 2024-12-10T14:55:04,933 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [db1d50717577,37707,1733842502956] 2024-12-10T14:55:04,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741832_1008 (size=32) 2024-12-10T14:55:04,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741832_1008 (size=32) 2024-12-10T14:55:04,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741832_1008 (size=32) 2024-12-10T14:55:04,989 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:55:04,991 DEBUG [RS:2;db1d50717577:45255 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-10T14:55:04,991 DEBUG [RS:0;db1d50717577:41775 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-10T14:55:04,991 DEBUG [RS:1;db1d50717577:37707 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-10T14:55:04,992 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-10T14:55:04,997 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-10T14:55:04,997 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:55:04,998 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T14:55:04,999 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-10T14:55:05,002 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-10T14:55:05,002 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:55:05,003 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T14:55:05,003 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-10T14:55:05,005 INFO [RS:2;db1d50717577:45255 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-10T14:55:05,006 INFO [RS:0;db1d50717577:41775 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-10T14:55:05,006 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-10T14:55:05,006 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:55:05,007 INFO [RS:1;db1d50717577:37707 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-10T14:55:05,007 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T14:55:05,008 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/hbase/meta/1588230740 2024-12-10T14:55:05,009 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/hbase/meta/1588230740 2024-12-10T14:55:05,013 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-10T14:55:05,016 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-10T14:55:05,020 INFO [RS:2;db1d50717577:45255 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-10T14:55:05,021 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T14:55:05,022 INFO [RS:0;db1d50717577:41775 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-10T14:55:05,023 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62790388, jitterRate=-0.06435030698776245}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-10T14:55:05,024 INFO [RS:2;db1d50717577:45255 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-10T14:55:05,024 INFO [RS:2;db1d50717577:45255 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T14:55:05,024 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-10T14:55:05,025 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-10T14:55:05,025 INFO [RS:0;db1d50717577:41775 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-10T14:55:05,025 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-10T14:55:05,025 INFO [RS:0;db1d50717577:41775 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T14:55:05,025 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-10T14:55:05,025 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-10T14:55:05,025 INFO [RS:1;db1d50717577:37707 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-10T14:55:05,026 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-10T14:55:05,026 INFO [RS:1;db1d50717577:37707 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-10T14:55:05,026 INFO [RS:1;db1d50717577:37707 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T14:55:05,027 INFO [RS:0;db1d50717577:41775 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-10T14:55:05,027 INFO [RS:2;db1d50717577:45255 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-10T14:55:05,029 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-10T14:55:05,029 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-10T14:55:05,030 INFO [RS:1;db1d50717577:37707 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-10T14:55:05,033 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-10T14:55:05,033 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-10T14:55:05,036 INFO [RS:2;db1d50717577:45255 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-10T14:55:05,036 INFO [RS:1;db1d50717577:37707 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-10T14:55:05,036 INFO [RS:0;db1d50717577:41775 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-10T14:55:05,037 DEBUG [RS:1;db1d50717577:37707 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/db1d50717577:0, corePoolSize=1, maxPoolSize=1 2024-12-10T14:55:05,037 DEBUG [RS:0;db1d50717577:41775 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/db1d50717577:0, corePoolSize=1, maxPoolSize=1 2024-12-10T14:55:05,037 DEBUG [RS:1;db1d50717577:37707 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/db1d50717577:0, corePoolSize=1, maxPoolSize=1 2024-12-10T14:55:05,037 DEBUG [RS:0;db1d50717577:41775 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/db1d50717577:0, corePoolSize=1, maxPoolSize=1 2024-12-10T14:55:05,037 DEBUG [RS:1;db1d50717577:37707 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/db1d50717577:0, corePoolSize=1, maxPoolSize=1 2024-12-10T14:55:05,037 DEBUG [RS:1;db1d50717577:37707 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/db1d50717577:0, corePoolSize=1, maxPoolSize=1 2024-12-10T14:55:05,037 DEBUG [RS:0;db1d50717577:41775 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/db1d50717577:0, corePoolSize=1, maxPoolSize=1 2024-12-10T14:55:05,037 DEBUG [RS:1;db1d50717577:37707 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/db1d50717577:0, corePoolSize=1, maxPoolSize=1 2024-12-10T14:55:05,038 DEBUG [RS:0;db1d50717577:41775 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/db1d50717577:0, corePoolSize=1, maxPoolSize=1 2024-12-10T14:55:05,038 DEBUG [RS:1;db1d50717577:37707 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/db1d50717577:0, corePoolSize=2, maxPoolSize=2 2024-12-10T14:55:05,038 DEBUG [RS:1;db1d50717577:37707 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/db1d50717577:0, corePoolSize=1, maxPoolSize=1 2024-12-10T14:55:05,038 DEBUG [RS:0;db1d50717577:41775 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/db1d50717577:0, corePoolSize=1, maxPoolSize=1 2024-12-10T14:55:05,038 DEBUG [RS:1;db1d50717577:37707 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/db1d50717577:0, corePoolSize=1, maxPoolSize=1 2024-12-10T14:55:05,038 DEBUG [RS:0;db1d50717577:41775 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/db1d50717577:0, corePoolSize=2, maxPoolSize=2 2024-12-10T14:55:05,038 DEBUG [RS:1;db1d50717577:37707 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/db1d50717577:0, corePoolSize=1, maxPoolSize=1 2024-12-10T14:55:05,038 DEBUG [RS:0;db1d50717577:41775 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/db1d50717577:0, corePoolSize=1, maxPoolSize=1 2024-12-10T14:55:05,038 DEBUG [RS:1;db1d50717577:37707 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/db1d50717577:0, corePoolSize=1, maxPoolSize=1 2024-12-10T14:55:05,038 DEBUG [RS:1;db1d50717577:37707 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/db1d50717577:0, corePoolSize=1, maxPoolSize=1 2024-12-10T14:55:05,038 DEBUG [RS:0;db1d50717577:41775 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/db1d50717577:0, corePoolSize=1, maxPoolSize=1 2024-12-10T14:55:05,038 DEBUG [RS:1;db1d50717577:37707 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0, corePoolSize=3, maxPoolSize=3 2024-12-10T14:55:05,038 DEBUG [RS:0;db1d50717577:41775 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/db1d50717577:0, corePoolSize=1, maxPoolSize=1 2024-12-10T14:55:05,039 DEBUG [RS:1;db1d50717577:37707 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0, corePoolSize=3, maxPoolSize=3 2024-12-10T14:55:05,039 DEBUG [RS:0;db1d50717577:41775 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/db1d50717577:0, corePoolSize=1, maxPoolSize=1 2024-12-10T14:55:05,039 DEBUG [RS:2;db1d50717577:45255 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/db1d50717577:0, corePoolSize=1, maxPoolSize=1 2024-12-10T14:55:05,039 DEBUG [RS:0;db1d50717577:41775 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/db1d50717577:0, corePoolSize=1, maxPoolSize=1 2024-12-10T14:55:05,039 DEBUG [RS:2;db1d50717577:45255 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/db1d50717577:0, corePoolSize=1, maxPoolSize=1 2024-12-10T14:55:05,039 DEBUG [RS:0;db1d50717577:41775 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0, corePoolSize=3, maxPoolSize=3 2024-12-10T14:55:05,039 DEBUG [RS:2;db1d50717577:45255 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/db1d50717577:0, corePoolSize=1, maxPoolSize=1 2024-12-10T14:55:05,039 DEBUG [RS:2;db1d50717577:45255 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/db1d50717577:0, corePoolSize=1, maxPoolSize=1 2024-12-10T14:55:05,039 DEBUG [RS:0;db1d50717577:41775 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0, corePoolSize=3, maxPoolSize=3 2024-12-10T14:55:05,039 DEBUG [RS:2;db1d50717577:45255 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/db1d50717577:0, corePoolSize=1, maxPoolSize=1 2024-12-10T14:55:05,039 DEBUG [RS:2;db1d50717577:45255 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/db1d50717577:0, corePoolSize=2, maxPoolSize=2 2024-12-10T14:55:05,039 DEBUG [RS:2;db1d50717577:45255 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/db1d50717577:0, corePoolSize=1, maxPoolSize=1 2024-12-10T14:55:05,039 DEBUG [RS:2;db1d50717577:45255 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/db1d50717577:0, corePoolSize=1, maxPoolSize=1 2024-12-10T14:55:05,040 DEBUG [RS:2;db1d50717577:45255 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/db1d50717577:0, corePoolSize=1, maxPoolSize=1 2024-12-10T14:55:05,040 DEBUG [RS:2;db1d50717577:45255 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/db1d50717577:0, corePoolSize=1, maxPoolSize=1 2024-12-10T14:55:05,040 DEBUG [RS:2;db1d50717577:45255 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/db1d50717577:0, corePoolSize=1, maxPoolSize=1 2024-12-10T14:55:05,040 DEBUG [RS:2;db1d50717577:45255 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0, corePoolSize=3, maxPoolSize=3 2024-12-10T14:55:05,040 DEBUG [RS:2;db1d50717577:45255 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0, corePoolSize=3, maxPoolSize=3 2024-12-10T14:55:05,042 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-10T14:55:05,050 INFO [RS:1;db1d50717577:37707 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-10T14:55:05,050 INFO [RS:0;db1d50717577:41775 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-10T14:55:05,051 INFO [RS:1;db1d50717577:37707 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-10T14:55:05,051 INFO [RS:0;db1d50717577:41775 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-10T14:55:05,051 INFO [RS:2;db1d50717577:45255 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-10T14:55:05,051 INFO [RS:0;db1d50717577:41775 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-10T14:55:05,051 INFO [RS:2;db1d50717577:45255 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-10T14:55:05,051 INFO [RS:2;db1d50717577:45255 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-10T14:55:05,051 INFO [RS:0;db1d50717577:41775 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-10T14:55:05,051 INFO [RS:2;db1d50717577:45255 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-10T14:55:05,051 INFO [RS:0;db1d50717577:41775 {}] hbase.ChoreService(168): Chore ScheduledChore name=db1d50717577,41775,1733842502887-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-10T14:55:05,051 INFO [RS:2;db1d50717577:45255 {}] hbase.ChoreService(168): Chore ScheduledChore name=db1d50717577,45255,1733842503015-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-10T14:55:05,051 INFO [RS:1;db1d50717577:37707 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-10T14:55:05,052 INFO [RS:1;db1d50717577:37707 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-10T14:55:05,052 INFO [RS:1;db1d50717577:37707 {}] hbase.ChoreService(168): Chore ScheduledChore name=db1d50717577,37707,1733842502956-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-10T14:55:05,055 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-10T14:55:05,059 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-10T14:55:05,078 INFO [RS:0;db1d50717577:41775 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-10T14:55:05,079 INFO [RS:2;db1d50717577:45255 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-10T14:55:05,081 INFO [RS:1;db1d50717577:37707 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-10T14:55:05,082 INFO [RS:1;db1d50717577:37707 {}] hbase.ChoreService(168): Chore ScheduledChore name=db1d50717577,37707,1733842502956-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T14:55:05,083 INFO [RS:0;db1d50717577:41775 {}] hbase.ChoreService(168): Chore ScheduledChore name=db1d50717577,41775,1733842502887-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T14:55:05,090 INFO [RS:2;db1d50717577:45255 {}] hbase.ChoreService(168): Chore ScheduledChore name=db1d50717577,45255,1733842503015-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T14:55:05,124 INFO [RS:1;db1d50717577:37707 {}] regionserver.Replication(204): db1d50717577,37707,1733842502956 started 2024-12-10T14:55:05,125 INFO [RS:1;db1d50717577:37707 {}] regionserver.HRegionServer(1767): Serving as db1d50717577,37707,1733842502956, RpcServer on db1d50717577/172.17.0.2:37707, sessionid=0x1019cdee8310002 2024-12-10T14:55:05,126 DEBUG [RS:1;db1d50717577:37707 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-10T14:55:05,126 DEBUG [RS:1;db1d50717577:37707 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager db1d50717577,37707,1733842502956 2024-12-10T14:55:05,126 DEBUG [RS:1;db1d50717577:37707 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'db1d50717577,37707,1733842502956' 2024-12-10T14:55:05,126 DEBUG [RS:1;db1d50717577:37707 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-10T14:55:05,127 DEBUG [RS:1;db1d50717577:37707 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-10T14:55:05,132 DEBUG [RS:1;db1d50717577:37707 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-10T14:55:05,132 DEBUG [RS:1;db1d50717577:37707 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-10T14:55:05,132 DEBUG [RS:1;db1d50717577:37707 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager db1d50717577,37707,1733842502956 2024-12-10T14:55:05,132 DEBUG [RS:1;db1d50717577:37707 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'db1d50717577,37707,1733842502956' 2024-12-10T14:55:05,132 DEBUG [RS:1;db1d50717577:37707 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-10T14:55:05,133 DEBUG [RS:1;db1d50717577:37707 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-10T14:55:05,136 INFO [RS:0;db1d50717577:41775 {}] regionserver.Replication(204): db1d50717577,41775,1733842502887 started 2024-12-10T14:55:05,137 INFO [RS:0;db1d50717577:41775 {}] regionserver.HRegionServer(1767): Serving as db1d50717577,41775,1733842502887, RpcServer on db1d50717577/172.17.0.2:41775, sessionid=0x1019cdee8310001 2024-12-10T14:55:05,137 DEBUG [RS:0;db1d50717577:41775 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-10T14:55:05,137 DEBUG [RS:0;db1d50717577:41775 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager db1d50717577,41775,1733842502887 2024-12-10T14:55:05,137 DEBUG [RS:0;db1d50717577:41775 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'db1d50717577,41775,1733842502887' 2024-12-10T14:55:05,137 DEBUG [RS:0;db1d50717577:41775 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-10T14:55:05,137 DEBUG [RS:1;db1d50717577:37707 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-10T14:55:05,137 INFO [RS:1;db1d50717577:37707 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-10T14:55:05,138 INFO [RS:1;db1d50717577:37707 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-10T14:55:05,138 DEBUG [RS:0;db1d50717577:41775 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-10T14:55:05,139 DEBUG [RS:0;db1d50717577:41775 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-10T14:55:05,139 DEBUG [RS:0;db1d50717577:41775 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-10T14:55:05,139 DEBUG [RS:0;db1d50717577:41775 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager db1d50717577,41775,1733842502887 2024-12-10T14:55:05,139 DEBUG [RS:0;db1d50717577:41775 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'db1d50717577,41775,1733842502887' 2024-12-10T14:55:05,139 DEBUG [RS:0;db1d50717577:41775 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-10T14:55:05,140 DEBUG [RS:0;db1d50717577:41775 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-10T14:55:05,140 DEBUG [RS:0;db1d50717577:41775 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-10T14:55:05,140 INFO [RS:0;db1d50717577:41775 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-10T14:55:05,140 INFO [RS:0;db1d50717577:41775 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-10T14:55:05,144 INFO [RS:2;db1d50717577:45255 {}] regionserver.Replication(204): db1d50717577,45255,1733842503015 started 2024-12-10T14:55:05,144 INFO [RS:2;db1d50717577:45255 {}] regionserver.HRegionServer(1767): Serving as db1d50717577,45255,1733842503015, RpcServer on db1d50717577/172.17.0.2:45255, sessionid=0x1019cdee8310003 2024-12-10T14:55:05,145 DEBUG [RS:2;db1d50717577:45255 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-10T14:55:05,145 DEBUG [RS:2;db1d50717577:45255 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager db1d50717577,45255,1733842503015 2024-12-10T14:55:05,145 DEBUG [RS:2;db1d50717577:45255 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'db1d50717577,45255,1733842503015' 2024-12-10T14:55:05,145 DEBUG [RS:2;db1d50717577:45255 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-10T14:55:05,146 DEBUG [RS:2;db1d50717577:45255 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-10T14:55:05,148 DEBUG [RS:2;db1d50717577:45255 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-10T14:55:05,148 DEBUG [RS:2;db1d50717577:45255 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-10T14:55:05,148 DEBUG [RS:2;db1d50717577:45255 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager db1d50717577,45255,1733842503015 2024-12-10T14:55:05,148 DEBUG [RS:2;db1d50717577:45255 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'db1d50717577,45255,1733842503015' 2024-12-10T14:55:05,148 DEBUG [RS:2;db1d50717577:45255 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-10T14:55:05,149 DEBUG [RS:2;db1d50717577:45255 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-10T14:55:05,150 DEBUG [RS:2;db1d50717577:45255 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-10T14:55:05,150 INFO [RS:2;db1d50717577:45255 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-10T14:55:05,150 INFO [RS:2;db1d50717577:45255 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-10T14:55:05,210 WARN [db1d50717577:36533 {}] assignment.AssignmentManager(2423): No servers available; cannot place 1 unassigned regions. 2024-12-10T14:55:05,243 INFO [RS:0;db1d50717577:41775 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-10T14:55:05,243 INFO [RS:1;db1d50717577:37707 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-10T14:55:05,247 INFO [RS:1;db1d50717577:37707 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=db1d50717577%2C37707%2C1733842502956, suffix=, logDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/WALs/db1d50717577,37707,1733842502956, archiveDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/oldWALs, maxLogs=32 2024-12-10T14:55:05,247 INFO [RS:0;db1d50717577:41775 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=db1d50717577%2C41775%2C1733842502887, suffix=, logDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/WALs/db1d50717577,41775,1733842502887, archiveDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/oldWALs, maxLogs=32 2024-12-10T14:55:05,251 INFO [RS:2;db1d50717577:45255 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-10T14:55:05,254 INFO [RS:2;db1d50717577:45255 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=db1d50717577%2C45255%2C1733842503015, suffix=, logDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/WALs/db1d50717577,45255,1733842503015, archiveDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/oldWALs, maxLogs=32 2024-12-10T14:55:05,266 DEBUG [RS:0;db1d50717577:41775 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/WALs/db1d50717577,41775,1733842502887/db1d50717577%2C41775%2C1733842502887.1733842505252, exclude list is [], retry=0 2024-12-10T14:55:05,271 DEBUG [RS-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39383,DS-0960cec0-88f5-4837-9180-abd7da2449be,DISK] 2024-12-10T14:55:05,271 DEBUG [RS-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34057,DS-957b27d6-49d5-458e-948b-d34ad2b2482b,DISK] 2024-12-10T14:55:05,271 DEBUG [RS-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39483,DS-3591a6d7-1349-4590-be89-c09cb9965731,DISK] 2024-12-10T14:55:05,272 DEBUG [RS:1;db1d50717577:37707 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/WALs/db1d50717577,37707,1733842502956/db1d50717577%2C37707%2C1733842502956.1733842505252, exclude list is [], retry=0 2024-12-10T14:55:05,275 DEBUG [RS:2;db1d50717577:45255 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/WALs/db1d50717577,45255,1733842503015/db1d50717577%2C45255%2C1733842503015.1733842505256, exclude list is [], retry=0 2024-12-10T14:55:05,276 DEBUG [RS-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34057,DS-957b27d6-49d5-458e-948b-d34ad2b2482b,DISK] 2024-12-10T14:55:05,276 DEBUG [RS-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39483,DS-3591a6d7-1349-4590-be89-c09cb9965731,DISK] 2024-12-10T14:55:05,276 DEBUG [RS-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39383,DS-0960cec0-88f5-4837-9180-abd7da2449be,DISK] 2024-12-10T14:55:05,279 DEBUG [RS-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39383,DS-0960cec0-88f5-4837-9180-abd7da2449be,DISK] 2024-12-10T14:55:05,279 DEBUG [RS-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34057,DS-957b27d6-49d5-458e-948b-d34ad2b2482b,DISK] 2024-12-10T14:55:05,279 DEBUG [RS-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39483,DS-3591a6d7-1349-4590-be89-c09cb9965731,DISK] 2024-12-10T14:55:05,282 INFO [RS:0;db1d50717577:41775 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/WALs/db1d50717577,41775,1733842502887/db1d50717577%2C41775%2C1733842502887.1733842505252 2024-12-10T14:55:05,285 DEBUG [RS:0;db1d50717577:41775 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34921:34921),(127.0.0.1/127.0.0.1:35659:35659),(127.0.0.1/127.0.0.1:39813:39813)] 2024-12-10T14:55:05,293 INFO [RS:1;db1d50717577:37707 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/WALs/db1d50717577,37707,1733842502956/db1d50717577%2C37707%2C1733842502956.1733842505252 2024-12-10T14:55:05,297 INFO [RS:2;db1d50717577:45255 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/WALs/db1d50717577,45255,1733842503015/db1d50717577%2C45255%2C1733842503015.1733842505256 2024-12-10T14:55:05,300 DEBUG [RS:1;db1d50717577:37707 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35659:35659),(127.0.0.1/127.0.0.1:39813:39813),(127.0.0.1/127.0.0.1:34921:34921)] 2024-12-10T14:55:05,304 DEBUG [RS:2;db1d50717577:45255 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34921:34921),(127.0.0.1/127.0.0.1:35659:35659),(127.0.0.1/127.0.0.1:39813:39813)] 2024-12-10T14:55:05,462 DEBUG [db1d50717577:36533 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-10T14:55:05,466 DEBUG [db1d50717577:36533 {}] balancer.BalancerClusterState(202): Hosts are {db1d50717577=0} racks are {/default-rack=0} 2024-12-10T14:55:05,489 DEBUG [db1d50717577:36533 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-10T14:55:05,489 DEBUG [db1d50717577:36533 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-10T14:55:05,489 DEBUG [db1d50717577:36533 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-10T14:55:05,489 INFO [db1d50717577:36533 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-10T14:55:05,489 INFO [db1d50717577:36533 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-10T14:55:05,489 INFO [db1d50717577:36533 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-10T14:55:05,489 DEBUG [db1d50717577:36533 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-10T14:55:05,496 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=db1d50717577,37707,1733842502956 2024-12-10T14:55:05,505 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as db1d50717577,37707,1733842502956, state=OPENING 2024-12-10T14:55:05,511 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-10T14:55:05,514 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T14:55:05,514 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45255-0x1019cdee8310003, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T14:55:05,514 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T14:55:05,514 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T14:55:05,516 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T14:55:05,516 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T14:55:05,517 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=db1d50717577,37707,1733842502956}] 2024-12-10T14:55:05,520 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T14:55:05,520 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T14:55:05,708 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,37707,1733842502956 2024-12-10T14:55:05,710 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-10T14:55:05,714 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59756, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-10T14:55:05,731 INFO [RS_OPEN_META-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-10T14:55:05,731 INFO [RS_OPEN_META-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T14:55:05,732 INFO [RS_OPEN_META-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-10T14:55:05,735 INFO [RS_OPEN_META-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=db1d50717577%2C37707%2C1733842502956.meta, suffix=.meta, logDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/WALs/db1d50717577,37707,1733842502956, archiveDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/oldWALs, maxLogs=32 2024-12-10T14:55:05,757 DEBUG [RS_OPEN_META-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/WALs/db1d50717577,37707,1733842502956/db1d50717577%2C37707%2C1733842502956.meta.1733842505737.meta, exclude list is [], retry=0 2024-12-10T14:55:05,762 DEBUG [RS-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34057,DS-957b27d6-49d5-458e-948b-d34ad2b2482b,DISK] 2024-12-10T14:55:05,762 DEBUG [RS-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39483,DS-3591a6d7-1349-4590-be89-c09cb9965731,DISK] 2024-12-10T14:55:05,762 DEBUG [RS-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39383,DS-0960cec0-88f5-4837-9180-abd7da2449be,DISK] 2024-12-10T14:55:05,766 INFO [RS_OPEN_META-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/WALs/db1d50717577,37707,1733842502956/db1d50717577%2C37707%2C1733842502956.meta.1733842505737.meta 2024-12-10T14:55:05,767 DEBUG [RS_OPEN_META-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35659:35659),(127.0.0.1/127.0.0.1:39813:39813),(127.0.0.1/127.0.0.1:34921:34921)] 2024-12-10T14:55:05,767 DEBUG [RS_OPEN_META-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-10T14:55:05,768 DEBUG [RS_OPEN_META-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=AccessControlService 2024-12-10T14:55:05,769 INFO [RS_OPEN_META-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-10T14:55:05,770 DEBUG [RS_OPEN_META-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-10T14:55:05,771 DEBUG [RS_OPEN_META-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-10T14:55:05,772 INFO [RS_OPEN_META-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-10T14:55:05,782 DEBUG [RS_OPEN_META-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-10T14:55:05,782 DEBUG [RS_OPEN_META-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:55:05,782 DEBUG [RS_OPEN_META-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-10T14:55:05,782 DEBUG [RS_OPEN_META-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-10T14:55:05,786 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-10T14:55:05,787 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-10T14:55:05,787 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:55:05,788 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T14:55:05,788 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-10T14:55:05,790 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-10T14:55:05,790 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:55:05,793 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T14:55:05,793 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-10T14:55:05,794 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-10T14:55:05,794 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:55:05,795 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T14:55:05,797 DEBUG [RS_OPEN_META-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/hbase/meta/1588230740 2024-12-10T14:55:05,800 DEBUG [RS_OPEN_META-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/hbase/meta/1588230740 2024-12-10T14:55:05,803 DEBUG [RS_OPEN_META-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-10T14:55:05,806 DEBUG [RS_OPEN_META-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-10T14:55:05,808 INFO [RS_OPEN_META-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71044731, jitterRate=0.05864898860454559}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-10T14:55:05,811 DEBUG [RS_OPEN_META-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-10T14:55:05,820 INFO [RS_OPEN_META-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733842505701 2024-12-10T14:55:05,838 DEBUG [RS_OPEN_META-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-10T14:55:05,839 INFO [RS_OPEN_META-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-10T14:55:05,840 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=db1d50717577,37707,1733842502956 2024-12-10T14:55:05,842 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as db1d50717577,37707,1733842502956, state=OPEN 2024-12-10T14:55:05,846 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-10T14:55:05,846 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-10T14:55:05,846 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45255-0x1019cdee8310003, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-10T14:55:05,846 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T14:55:05,846 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T14:55:05,846 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-10T14:55:05,846 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T14:55:05,846 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T14:55:05,852 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-10T14:55:05,853 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=db1d50717577,37707,1733842502956 in 329 msec 2024-12-10T14:55:05,860 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-10T14:55:05,860 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 812 msec 2024-12-10T14:55:05,866 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 1.2120 sec 2024-12-10T14:55:05,866 INFO [master/db1d50717577:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733842505866, completionTime=-1 2024-12-10T14:55:05,866 INFO [master/db1d50717577:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-10T14:55:05,867 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-10T14:55:05,912 DEBUG [hconnection-0x50bdc134-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:55:05,915 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59766, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:55:05,930 INFO [master/db1d50717577:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=3 2024-12-10T14:55:05,930 INFO [master/db1d50717577:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733842565930 2024-12-10T14:55:05,930 INFO [master/db1d50717577:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733842625930 2024-12-10T14:55:05,930 INFO [master/db1d50717577:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 63 msec 2024-12-10T14:55:05,964 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] balancer.RegionLocationFinder(172): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-10T14:55:05,974 INFO [master/db1d50717577:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db1d50717577,36533,1733842502047-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T14:55:05,974 INFO [master/db1d50717577:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db1d50717577,36533,1733842502047-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T14:55:05,974 INFO [master/db1d50717577:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db1d50717577,36533,1733842502047-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T14:55:05,976 INFO [master/db1d50717577:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-db1d50717577:36533, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T14:55:05,976 INFO [master/db1d50717577:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-10T14:55:05,984 DEBUG [master/db1d50717577:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-10T14:55:05,988 INFO [master/db1d50717577:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-10T14:55:05,990 INFO [master/db1d50717577:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-10T14:55:05,996 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-10T14:55:05,999 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-10T14:55:06,000 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:55:06,002 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-10T14:55:06,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741837_1013 (size=358) 2024-12-10T14:55:06,025 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 763faff5dd874fa35c8dd37f5437c5e6, NAME => 'hbase:namespace,,1733842505989.763faff5dd874fa35c8dd37f5437c5e6.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef 2024-12-10T14:55:06,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741837_1013 (size=358) 2024-12-10T14:55:06,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741837_1013 (size=358) 2024-12-10T14:55:06,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741838_1014 (size=42) 2024-12-10T14:55:06,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741838_1014 (size=42) 2024-12-10T14:55:06,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741838_1014 (size=42) 2024-12-10T14:55:06,046 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733842505989.763faff5dd874fa35c8dd37f5437c5e6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:55:06,046 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 763faff5dd874fa35c8dd37f5437c5e6, disabling compactions & flushes 2024-12-10T14:55:06,046 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733842505989.763faff5dd874fa35c8dd37f5437c5e6. 2024-12-10T14:55:06,046 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733842505989.763faff5dd874fa35c8dd37f5437c5e6. 2024-12-10T14:55:06,046 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733842505989.763faff5dd874fa35c8dd37f5437c5e6. after waiting 0 ms 2024-12-10T14:55:06,046 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733842505989.763faff5dd874fa35c8dd37f5437c5e6. 2024-12-10T14:55:06,046 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1733842505989.763faff5dd874fa35c8dd37f5437c5e6. 2024-12-10T14:55:06,046 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 763faff5dd874fa35c8dd37f5437c5e6: 2024-12-10T14:55:06,049 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-10T14:55:06,059 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1733842505989.763faff5dd874fa35c8dd37f5437c5e6.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1733842506051"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733842506051"}]},"ts":"1733842506051"} 2024-12-10T14:55:06,089 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-10T14:55:06,091 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-10T14:55:06,095 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733842506091"}]},"ts":"1733842506091"} 2024-12-10T14:55:06,099 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-10T14:55:06,103 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(202): Hosts are {db1d50717577=0} racks are {/default-rack=0} 2024-12-10T14:55:06,105 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-10T14:55:06,105 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-10T14:55:06,105 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-10T14:55:06,105 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-10T14:55:06,105 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-10T14:55:06,105 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-10T14:55:06,105 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-10T14:55:06,107 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=763faff5dd874fa35c8dd37f5437c5e6, ASSIGN}] 2024-12-10T14:55:06,110 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=763faff5dd874fa35c8dd37f5437c5e6, ASSIGN 2024-12-10T14:55:06,112 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=763faff5dd874fa35c8dd37f5437c5e6, ASSIGN; state=OFFLINE, location=db1d50717577,45255,1733842503015; forceNewPlan=false, retain=false 2024-12-10T14:55:06,263 INFO [db1d50717577:36533 {}] balancer.BaseLoadBalancer(546): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-10T14:55:06,264 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=763faff5dd874fa35c8dd37f5437c5e6, regionState=OPENING, regionLocation=db1d50717577,45255,1733842503015 2024-12-10T14:55:06,267 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 763faff5dd874fa35c8dd37f5437c5e6, server=db1d50717577,45255,1733842503015}] 2024-12-10T14:55:06,421 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,45255,1733842503015 2024-12-10T14:55:06,421 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-10T14:55:06,424 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47722, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-10T14:55:06,433 INFO [RS_OPEN_PRIORITY_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1733842505989.763faff5dd874fa35c8dd37f5437c5e6. 2024-12-10T14:55:06,433 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 763faff5dd874fa35c8dd37f5437c5e6, NAME => 'hbase:namespace,,1733842505989.763faff5dd874fa35c8dd37f5437c5e6.', STARTKEY => '', ENDKEY => ''} 2024-12-10T14:55:06,434 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:namespace,,1733842505989.763faff5dd874fa35c8dd37f5437c5e6. service=AccessControlService 2024-12-10T14:55:06,434 INFO [RS_OPEN_PRIORITY_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-10T14:55:06,435 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 763faff5dd874fa35c8dd37f5437c5e6 2024-12-10T14:55:06,435 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733842505989.763faff5dd874fa35c8dd37f5437c5e6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:55:06,435 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 763faff5dd874fa35c8dd37f5437c5e6 2024-12-10T14:55:06,435 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 763faff5dd874fa35c8dd37f5437c5e6 2024-12-10T14:55:06,438 INFO [StoreOpener-763faff5dd874fa35c8dd37f5437c5e6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 763faff5dd874fa35c8dd37f5437c5e6 2024-12-10T14:55:06,442 INFO [StoreOpener-763faff5dd874fa35c8dd37f5437c5e6-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 763faff5dd874fa35c8dd37f5437c5e6 columnFamilyName info 2024-12-10T14:55:06,442 DEBUG [StoreOpener-763faff5dd874fa35c8dd37f5437c5e6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:55:06,443 INFO [StoreOpener-763faff5dd874fa35c8dd37f5437c5e6-1 {}] regionserver.HStore(327): Store=763faff5dd874fa35c8dd37f5437c5e6/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T14:55:06,445 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/hbase/namespace/763faff5dd874fa35c8dd37f5437c5e6 2024-12-10T14:55:06,446 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/hbase/namespace/763faff5dd874fa35c8dd37f5437c5e6 2024-12-10T14:55:06,451 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 763faff5dd874fa35c8dd37f5437c5e6 2024-12-10T14:55:06,456 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/hbase/namespace/763faff5dd874fa35c8dd37f5437c5e6/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T14:55:06,457 INFO [RS_OPEN_PRIORITY_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 763faff5dd874fa35c8dd37f5437c5e6; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64792749, jitterRate=-0.034512802958488464}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-10T14:55:06,459 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 763faff5dd874fa35c8dd37f5437c5e6: 2024-12-10T14:55:06,461 INFO [RS_OPEN_PRIORITY_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1733842505989.763faff5dd874fa35c8dd37f5437c5e6., pid=6, masterSystemTime=1733842506421 2024-12-10T14:55:06,465 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1733842505989.763faff5dd874fa35c8dd37f5437c5e6. 2024-12-10T14:55:06,465 INFO [RS_OPEN_PRIORITY_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1733842505989.763faff5dd874fa35c8dd37f5437c5e6. 2024-12-10T14:55:06,467 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=763faff5dd874fa35c8dd37f5437c5e6, regionState=OPEN, openSeqNum=2, regionLocation=db1d50717577,45255,1733842503015 2024-12-10T14:55:06,481 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-10T14:55:06,482 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 763faff5dd874fa35c8dd37f5437c5e6, server=db1d50717577,45255,1733842503015 in 207 msec 2024-12-10T14:55:06,486 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-10T14:55:06,486 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=763faff5dd874fa35c8dd37f5437c5e6, ASSIGN in 374 msec 2024-12-10T14:55:06,487 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-10T14:55:06,488 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733842506487"}]},"ts":"1733842506487"} 2024-12-10T14:55:06,491 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-10T14:55:06,495 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-10T14:55:06,499 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-10T14:55:06,499 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 506 msec 2024-12-10T14:55:06,502 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45255-0x1019cdee8310003, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T14:55:06,502 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T14:55:06,502 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T14:55:06,502 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-10T14:55:06,502 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T14:55:06,527 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:55:06,529 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47736, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:55:06,543 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-10T14:55:06,563 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-10T14:55:06,572 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 34 msec 2024-12-10T14:55:06,582 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-10T14:55:06,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-10T14:55:06,604 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 21 msec 2024-12-10T14:55:06,623 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-10T14:55:06,626 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-10T14:55:06,626 INFO [master/db1d50717577:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 3.520sec 2024-12-10T14:55:06,634 INFO [master/db1d50717577:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-10T14:55:06,636 INFO [master/db1d50717577:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-10T14:55:06,637 INFO [master/db1d50717577:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-10T14:55:06,638 INFO [master/db1d50717577:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-10T14:55:06,638 INFO [master/db1d50717577:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-10T14:55:06,639 INFO [master/db1d50717577:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db1d50717577,36533,1733842502047-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-10T14:55:06,640 INFO [master/db1d50717577:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db1d50717577,36533,1733842502047-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-10T14:55:06,698 INFO [master/db1d50717577:0:becomeActiveMaster {}] master.HMaster$4(2389): Client=null/null create 'hbase:acl', {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-10T14:55:06,701 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:acl 2024-12-10T14:55:06,702 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x75e74344 to 127.0.0.1:57176 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@76ebc6b4 2024-12-10T14:55:06,703 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-10T14:55:06,704 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:55:06,704 INFO [master/db1d50717577:0:becomeActiveMaster {}] master.MasterRpcServices(713): Client=null/null procedure request for creating table: namespace: "hbase" qualifier: "acl" procId is: 9 2024-12-10T14:55:06,704 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-12-10T14:55:06,706 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-10T14:55:06,709 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-10T14:55:06,747 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17c05315, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:55:06,759 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-10T14:55:06,759 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-10T14:55:06,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741839_1015 (size=349) 2024-12-10T14:55:06,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741839_1015 (size=349) 2024-12-10T14:55:06,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741839_1015 (size=349) 2024-12-10T14:55:06,777 DEBUG [hconnection-0x35b717f-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:55:06,785 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 99bb2446a6b84f36bf016b19fe4722e6, NAME => 'hbase:acl,,1733842506693.99bb2446a6b84f36bf016b19fe4722e6.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:acl', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef 2024-12-10T14:55:06,800 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59768, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:55:06,804 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=db1d50717577,36533,1733842502047 2024-12-10T14:55:06,804 INFO [Time-limited test {}] hbase.HBaseTestingUtility(2790): Starting mini mapreduce cluster... 2024-12-10T14:55:06,804 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/test.cache.data in system properties and HBase conf 2024-12-10T14:55:06,804 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/hadoop.tmp.dir in system properties and HBase conf 2024-12-10T14:55:06,804 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/hadoop.log.dir in system properties and HBase conf 2024-12-10T14:55:06,805 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-10T14:55:06,805 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-10T14:55:06,805 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-10T14:55:06,805 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-10T14:55:06,805 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-10T14:55:06,805 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-10T14:55:06,805 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-10T14:55:06,805 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-10T14:55:06,805 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-10T14:55:06,805 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-10T14:55:06,805 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-10T14:55:06,805 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-10T14:55:06,805 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/nfs.dump.dir in system properties and HBase conf 2024-12-10T14:55:06,805 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/java.io.tmpdir in system properties and HBase conf 2024-12-10T14:55:06,806 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-10T14:55:06,806 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-10T14:55:06,806 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-10T14:55:06,811 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-10T14:55:06,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741840_1016 (size=36) 2024-12-10T14:55:06,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741840_1016 (size=36) 2024-12-10T14:55:06,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741840_1016 (size=36) 2024-12-10T14:55:06,837 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:acl,,1733842506693.99bb2446a6b84f36bf016b19fe4722e6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:55:06,837 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1681): Closing 99bb2446a6b84f36bf016b19fe4722e6, disabling compactions & flushes 2024-12-10T14:55:06,837 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:acl,,1733842506693.99bb2446a6b84f36bf016b19fe4722e6. 2024-12-10T14:55:06,837 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:acl,,1733842506693.99bb2446a6b84f36bf016b19fe4722e6. 2024-12-10T14:55:06,837 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:acl,,1733842506693.99bb2446a6b84f36bf016b19fe4722e6. after waiting 0 ms 2024-12-10T14:55:06,837 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:acl,,1733842506693.99bb2446a6b84f36bf016b19fe4722e6. 2024-12-10T14:55:06,837 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1922): Closed hbase:acl,,1733842506693.99bb2446a6b84f36bf016b19fe4722e6. 2024-12-10T14:55:06,837 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1635): Region close journal for 99bb2446a6b84f36bf016b19fe4722e6: 2024-12-10T14:55:06,841 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ADD_TO_META 2024-12-10T14:55:06,841 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:acl,,1733842506693.99bb2446a6b84f36bf016b19fe4722e6.","families":{"info":[{"qualifier":"regioninfo","vlen":35,"tag":[],"timestamp":"1733842506841"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733842506841"}]},"ts":"1733842506841"} 2024-12-10T14:55:06,845 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-10T14:55:06,848 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-10T14:55:06,848 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733842506848"}]},"ts":"1733842506848"} 2024-12-10T14:55:06,852 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:acl, state=ENABLING in hbase:meta 2024-12-10T14:55:06,857 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(202): Hosts are {db1d50717577=0} racks are {/default-rack=0} 2024-12-10T14:55:06,859 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-10T14:55:06,859 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-10T14:55:06,859 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-10T14:55:06,859 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-10T14:55:06,859 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-10T14:55:06,859 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-10T14:55:06,859 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-10T14:55:06,859 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:acl, region=99bb2446a6b84f36bf016b19fe4722e6, ASSIGN}] 2024-12-10T14:55:06,862 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:acl, region=99bb2446a6b84f36bf016b19fe4722e6, ASSIGN 2024-12-10T14:55:06,865 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:acl, region=99bb2446a6b84f36bf016b19fe4722e6, ASSIGN; state=OFFLINE, location=db1d50717577,41775,1733842502887; forceNewPlan=false, retain=false 2024-12-10T14:55:06,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741841_1017 (size=592039) 2024-12-10T14:55:06,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741841_1017 (size=592039) 2024-12-10T14:55:06,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741841_1017 (size=592039) 2024-12-10T14:55:06,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741842_1018 (size=1663647) 2024-12-10T14:55:06,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741842_1018 (size=1663647) 2024-12-10T14:55:06,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741842_1018 (size=1663647) 2024-12-10T14:55:07,012 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-10T14:55:07,015 INFO [db1d50717577:36533 {}] balancer.BaseLoadBalancer(546): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-10T14:55:07,016 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=99bb2446a6b84f36bf016b19fe4722e6, regionState=OPENING, regionLocation=db1d50717577,41775,1733842502887 2024-12-10T14:55:07,020 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 99bb2446a6b84f36bf016b19fe4722e6, server=db1d50717577,41775,1733842502887}] 2024-12-10T14:55:07,189 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,41775,1733842502887 2024-12-10T14:55:07,190 DEBUG [RSProcedureDispatcher-pool-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-10T14:55:07,213 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35662, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-10T14:55:07,227 INFO [RS_OPEN_PRIORITY_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] handler.AssignRegionHandler(135): Open hbase:acl,,1733842506693.99bb2446a6b84f36bf016b19fe4722e6. 2024-12-10T14:55:07,227 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 99bb2446a6b84f36bf016b19fe4722e6, NAME => 'hbase:acl,,1733842506693.99bb2446a6b84f36bf016b19fe4722e6.', STARTKEY => '', ENDKEY => ''} 2024-12-10T14:55:07,232 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:acl,,1733842506693.99bb2446a6b84f36bf016b19fe4722e6. service=AccessControlService 2024-12-10T14:55:07,232 INFO [RS_OPEN_PRIORITY_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-10T14:55:07,233 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table acl 99bb2446a6b84f36bf016b19fe4722e6 2024-12-10T14:55:07,233 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(894): Instantiated hbase:acl,,1733842506693.99bb2446a6b84f36bf016b19fe4722e6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:55:07,233 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 99bb2446a6b84f36bf016b19fe4722e6 2024-12-10T14:55:07,233 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 99bb2446a6b84f36bf016b19fe4722e6 2024-12-10T14:55:07,245 INFO [StoreOpener-99bb2446a6b84f36bf016b19fe4722e6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family l of region 99bb2446a6b84f36bf016b19fe4722e6 2024-12-10T14:55:07,251 INFO [StoreOpener-99bb2446a6b84f36bf016b19fe4722e6-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 99bb2446a6b84f36bf016b19fe4722e6 columnFamilyName l 2024-12-10T14:55:07,251 DEBUG [StoreOpener-99bb2446a6b84f36bf016b19fe4722e6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:55:07,252 INFO [StoreOpener-99bb2446a6b84f36bf016b19fe4722e6-1 {}] regionserver.HStore(327): Store=99bb2446a6b84f36bf016b19fe4722e6/l, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T14:55:07,260 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/hbase/acl/99bb2446a6b84f36bf016b19fe4722e6 2024-12-10T14:55:07,262 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/hbase/acl/99bb2446a6b84f36bf016b19fe4722e6 2024-12-10T14:55:07,269 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 99bb2446a6b84f36bf016b19fe4722e6 2024-12-10T14:55:07,288 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/hbase/acl/99bb2446a6b84f36bf016b19fe4722e6/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T14:55:07,290 INFO [RS_OPEN_PRIORITY_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(1102): Opened 99bb2446a6b84f36bf016b19fe4722e6; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70403581, jitterRate=0.04909510910511017}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-10T14:55:07,295 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 99bb2446a6b84f36bf016b19fe4722e6: 2024-12-10T14:55:07,297 INFO [RS_OPEN_PRIORITY_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:acl,,1733842506693.99bb2446a6b84f36bf016b19fe4722e6., pid=11, masterSystemTime=1733842507189 2024-12-10T14:55:07,301 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:acl,,1733842506693.99bb2446a6b84f36bf016b19fe4722e6. 2024-12-10T14:55:07,301 INFO [RS_OPEN_PRIORITY_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] handler.AssignRegionHandler(164): Opened hbase:acl,,1733842506693.99bb2446a6b84f36bf016b19fe4722e6. 2024-12-10T14:55:07,302 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=99bb2446a6b84f36bf016b19fe4722e6, regionState=OPEN, openSeqNum=2, regionLocation=db1d50717577,41775,1733842502887 2024-12-10T14:55:07,311 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-10T14:55:07,312 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-10T14:55:07,313 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 99bb2446a6b84f36bf016b19fe4722e6, server=db1d50717577,41775,1733842502887 in 285 msec 2024-12-10T14:55:07,316 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-10T14:55:07,316 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=hbase:acl, region=99bb2446a6b84f36bf016b19fe4722e6, ASSIGN in 452 msec 2024-12-10T14:55:07,318 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-10T14:55:07,318 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733842507318"}]},"ts":"1733842507318"} 2024-12-10T14:55:07,322 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:acl, state=ENABLED in hbase:meta 2024-12-10T14:55:07,326 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_POST_OPERATION 2024-12-10T14:55:07,331 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=hbase:acl in 628 msec 2024-12-10T14:55:07,812 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-10T14:55:07,813 INFO [master/db1d50717577:0:becomeActiveMaster {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: hbase:acl, procId: 9 completed 2024-12-10T14:55:07,833 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-10T14:55:07,834 INFO [master/db1d50717577:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-10T14:55:07,835 INFO [master/db1d50717577:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db1d50717577,36533,1733842502047-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T14:55:08,624 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-10T14:55:08,818 WARN [Thread-398 {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-10T14:55:09,213 INFO [Thread-398 {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-10T14:55:09,214 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-10T14:55:09,216 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-10T14:55:09,279 INFO [Thread-398 {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-10T14:55:09,279 INFO [Thread-398 {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-10T14:55:09,280 INFO [Thread-398 {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-10T14:55:09,280 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-10T14:55:09,281 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-10T14:55:09,281 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-10T14:55:09,288 INFO [Thread-398 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@136ae407{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/hadoop.log.dir/,AVAILABLE} 2024-12-10T14:55:09,288 INFO [Thread-398 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@49ebb85c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-10T14:55:09,299 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-10T14:55:09,317 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@74dfb21{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/hadoop.log.dir/,AVAILABLE} 2024-12-10T14:55:09,318 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1fe36ded{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-10T14:55:09,482 INFO [Thread-398 {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver as a provider class 2024-12-10T14:55:09,483 INFO [Thread-398 {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices as a root resource class 2024-12-10T14:55:09,483 INFO [Thread-398 {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-10T14:55:09,485 INFO [Thread-398 {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-10T14:55:09,544 INFO [Thread-398 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-10T14:55:09,962 INFO [Thread-398 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-10T14:55:10,385 INFO [Thread-398 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-10T14:55:10,410 INFO [Thread-398 {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@71eb319{jobhistory,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/java.io.tmpdir/jetty-localhost-40701-hadoop-yarn-common-3_4_1_jar-_-any-9219127590921109726/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-12-10T14:55:10,410 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4c9b4752{cluster,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/java.io.tmpdir/jetty-localhost-41105-hadoop-yarn-common-3_4_1_jar-_-any-17863548164234686232/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-12-10T14:55:10,411 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6f5514d3{HTTP/1.1, (http/1.1)}{localhost:41105} 2024-12-10T14:55:10,411 INFO [Thread-398 {}] server.AbstractConnector(333): Started ServerConnector@5c6d1600{HTTP/1.1, (http/1.1)}{localhost:40701} 2024-12-10T14:55:10,411 INFO [Thread-398 {}] server.Server(415): Started @15383ms 2024-12-10T14:55:10,411 INFO [Time-limited test {}] server.Server(415): Started @15383ms 2024-12-10T14:55:10,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741843_1019 (size=5) 2024-12-10T14:55:10,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741843_1019 (size=5) 2024-12-10T14:55:10,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741843_1019 (size=5) 2024-12-10T14:55:11,297 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-10T14:55:11,455 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-10T14:55:11,458 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-10T14:55:11,459 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:acl' 2024-12-10T14:55:11,800 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-12-10T14:55:11,809 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-10T14:55:11,882 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-10T14:55:11,883 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-10T14:55:11,893 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-10T14:55:11,893 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-10T14:55:11,893 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-10T14:55:11,894 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-10T14:55:11,900 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3604227f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/hadoop.log.dir/,AVAILABLE} 2024-12-10T14:55:11,901 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1f4563e1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-10T14:55:11,980 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-12-10T14:55:11,980 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-10T14:55:11,980 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-12-10T14:55:11,980 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-10T14:55:11,992 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-10T14:55:12,014 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-10T14:55:12,154 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-10T14:55:12,178 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3ca3f0e8{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/java.io.tmpdir/jetty-localhost-35559-hadoop-yarn-common-3_4_1_jar-_-any-7672626435055306321/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-10T14:55:12,182 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@17f11178{HTTP/1.1, (http/1.1)}{localhost:35559} 2024-12-10T14:55:12,182 INFO [Time-limited test {}] server.Server(415): Started @17154ms 2024-12-10T14:55:12,414 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-12-10T14:55:12,418 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-10T14:55:12,448 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-10T14:55:12,449 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-10T14:55:12,457 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-10T14:55:12,457 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-10T14:55:12,457 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-10T14:55:12,458 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-10T14:55:12,459 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@68541e95{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/hadoop.log.dir/,AVAILABLE} 2024-12-10T14:55:12,460 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5946649e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-10T14:55:12,522 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-12-10T14:55:12,523 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-10T14:55:12,523 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-12-10T14:55:12,523 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-10T14:55:12,532 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-10T14:55:12,541 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-10T14:55:12,593 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-10T14:55:12,593 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionObservers 2024-12-10T14:55:12,594 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-10T14:55:12,594 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-10T14:55:12,595 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-10T14:55:12,595 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionServerObservers 2024-12-10T14:55:12,595 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-10T14:55:12,595 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-10T14:55:12,596 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-10T14:55:12,596 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-12-10T14:55:12,598 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-12-10T14:55:12,598 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_acl Metrics about Tables on a single HBase RegionServer 2024-12-10T14:55:12,599 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-10T14:55:12,600 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase MasterObservers 2024-12-10T14:55:12,600 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-10T14:55:12,600 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver Metrics about HBase MasterObservers 2024-12-10T14:55:12,600 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-10T14:55:12,600 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-10T14:55:12,645 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-10T14:55:12,651 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4fdaa78e{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/java.io.tmpdir/jetty-localhost-35567-hadoop-yarn-common-3_4_1_jar-_-any-1464753541513952732/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-10T14:55:12,653 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@31762410{HTTP/1.1, (http/1.1)}{localhost:35567} 2024-12-10T14:55:12,653 INFO [Time-limited test {}] server.Server(415): Started @17625ms 2024-12-10T14:55:12,684 INFO [Time-limited test {}] hbase.HBaseTestingUtility(2825): Mini mapreduce cluster started 2024-12-10T14:55:12,686 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [30,000] milli-secs(wait.for.ratio=[1]) 2024-12-10T14:55:12,718 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithTargetName Thread=726, OpenFileDescriptor=785, MaxFileDescriptor=1048576, SystemLoadAverage=237, ProcessCount=11, AvailableMemoryMB=3437 2024-12-10T14:55:12,719 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=726 is superior to 500 2024-12-10T14:55:12,731 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-10T14:55:12,734 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49714, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-10T14:55:12,740 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-10T14:55:12,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportWithTargetName 2024-12-10T14:55:12,744 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=12, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_PRE_OPERATION 2024-12-10T14:55:12,744 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithTargetName" procId is: 12 2024-12-10T14:55:12,744 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:55:12,746 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=12, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-10T14:55:12,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-10T14:55:12,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741844_1020 (size=406) 2024-12-10T14:55:12,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741844_1020 (size=406) 2024-12-10T14:55:12,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741844_1020 (size=406) 2024-12-10T14:55:12,763 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => e2dc0ecfbe8bd1c496953eed3786cf3c, NAME => 'testtb-testExportWithTargetName,,1733842512739.e2dc0ecfbe8bd1c496953eed3786cf3c.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef 2024-12-10T14:55:12,764 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 5ecadaf55e885f390398f6ef1e12d757, NAME => 'testtb-testExportWithTargetName,1,1733842512739.5ecadaf55e885f390398f6ef1e12d757.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef 2024-12-10T14:55:12,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741846_1022 (size=67) 2024-12-10T14:55:12,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741846_1022 (size=67) 2024-12-10T14:55:12,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741846_1022 (size=67) 2024-12-10T14:55:12,785 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportWithTargetName,,1733842512739.e2dc0ecfbe8bd1c496953eed3786cf3c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:55:12,785 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1681): Closing e2dc0ecfbe8bd1c496953eed3786cf3c, disabling compactions & flushes 2024-12-10T14:55:12,785 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportWithTargetName,,1733842512739.e2dc0ecfbe8bd1c496953eed3786cf3c. 2024-12-10T14:55:12,785 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithTargetName,,1733842512739.e2dc0ecfbe8bd1c496953eed3786cf3c. 2024-12-10T14:55:12,785 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithTargetName,,1733842512739.e2dc0ecfbe8bd1c496953eed3786cf3c. after waiting 0 ms 2024-12-10T14:55:12,786 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithTargetName,,1733842512739.e2dc0ecfbe8bd1c496953eed3786cf3c. 2024-12-10T14:55:12,786 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportWithTargetName,,1733842512739.e2dc0ecfbe8bd1c496953eed3786cf3c. 2024-12-10T14:55:12,786 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1635): Region close journal for e2dc0ecfbe8bd1c496953eed3786cf3c: 2024-12-10T14:55:12,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741845_1021 (size=67) 2024-12-10T14:55:12,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741845_1021 (size=67) 2024-12-10T14:55:12,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741845_1021 (size=67) 2024-12-10T14:55:12,790 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportWithTargetName,1,1733842512739.5ecadaf55e885f390398f6ef1e12d757.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:55:12,790 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1681): Closing 5ecadaf55e885f390398f6ef1e12d757, disabling compactions & flushes 2024-12-10T14:55:12,790 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportWithTargetName,1,1733842512739.5ecadaf55e885f390398f6ef1e12d757. 2024-12-10T14:55:12,790 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithTargetName,1,1733842512739.5ecadaf55e885f390398f6ef1e12d757. 2024-12-10T14:55:12,790 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithTargetName,1,1733842512739.5ecadaf55e885f390398f6ef1e12d757. after waiting 0 ms 2024-12-10T14:55:12,790 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithTargetName,1,1733842512739.5ecadaf55e885f390398f6ef1e12d757. 2024-12-10T14:55:12,790 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportWithTargetName,1,1733842512739.5ecadaf55e885f390398f6ef1e12d757. 2024-12-10T14:55:12,791 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1635): Region close journal for 5ecadaf55e885f390398f6ef1e12d757: 2024-12-10T14:55:12,792 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=12, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ADD_TO_META 2024-12-10T14:55:12,793 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,,1733842512739.e2dc0ecfbe8bd1c496953eed3786cf3c.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1733842512792"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733842512792"}]},"ts":"1733842512792"} 2024-12-10T14:55:12,793 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,1,1733842512739.5ecadaf55e885f390398f6ef1e12d757.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1733842512792"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733842512792"}]},"ts":"1733842512792"} 2024-12-10T14:55:12,830 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-10T14:55:12,833 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=12, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-10T14:55:12,834 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733842512833"}]},"ts":"1733842512833"} 2024-12-10T14:55:12,842 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithTargetName, state=ENABLING in hbase:meta 2024-12-10T14:55:12,847 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(202): Hosts are {db1d50717577=0} racks are {/default-rack=0} 2024-12-10T14:55:12,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-10T14:55:12,850 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-10T14:55:12,850 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-10T14:55:12,850 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-10T14:55:12,850 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-10T14:55:12,850 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-10T14:55:12,850 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-10T14:55:12,850 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-10T14:55:12,850 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=e2dc0ecfbe8bd1c496953eed3786cf3c, ASSIGN}, {pid=14, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=5ecadaf55e885f390398f6ef1e12d757, ASSIGN}] 2024-12-10T14:55:12,852 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=14, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=5ecadaf55e885f390398f6ef1e12d757, ASSIGN 2024-12-10T14:55:12,852 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=13, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=e2dc0ecfbe8bd1c496953eed3786cf3c, ASSIGN 2024-12-10T14:55:12,855 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=14, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=5ecadaf55e885f390398f6ef1e12d757, ASSIGN; state=OFFLINE, location=db1d50717577,45255,1733842503015; forceNewPlan=false, retain=false 2024-12-10T14:55:12,855 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=13, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=e2dc0ecfbe8bd1c496953eed3786cf3c, ASSIGN; state=OFFLINE, location=db1d50717577,37707,1733842502956; forceNewPlan=false, retain=false 2024-12-10T14:55:13,006 INFO [db1d50717577:36533 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-10T14:55:13,006 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=13 updating hbase:meta row=e2dc0ecfbe8bd1c496953eed3786cf3c, regionState=OPENING, regionLocation=db1d50717577,37707,1733842502956 2024-12-10T14:55:13,006 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=14 updating hbase:meta row=5ecadaf55e885f390398f6ef1e12d757, regionState=OPENING, regionLocation=db1d50717577,45255,1733842503015 2024-12-10T14:55:13,010 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=13, state=RUNNABLE; OpenRegionProcedure e2dc0ecfbe8bd1c496953eed3786cf3c, server=db1d50717577,37707,1733842502956}] 2024-12-10T14:55:13,020 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=16, ppid=14, state=RUNNABLE; OpenRegionProcedure 5ecadaf55e885f390398f6ef1e12d757, server=db1d50717577,45255,1733842503015}] 2024-12-10T14:55:13,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-10T14:55:13,166 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,37707,1733842502956 2024-12-10T14:55:13,175 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,45255,1733842503015 2024-12-10T14:55:13,190 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] handler.AssignRegionHandler(135): Open testtb-testExportWithTargetName,,1733842512739.e2dc0ecfbe8bd1c496953eed3786cf3c. 2024-12-10T14:55:13,190 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(7285): Opening region: {ENCODED => e2dc0ecfbe8bd1c496953eed3786cf3c, NAME => 'testtb-testExportWithTargetName,,1733842512739.e2dc0ecfbe8bd1c496953eed3786cf3c.', STARTKEY => '', ENDKEY => '1'} 2024-12-10T14:55:13,191 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportWithTargetName,,1733842512739.e2dc0ecfbe8bd1c496953eed3786cf3c. service=AccessControlService 2024-12-10T14:55:13,191 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-10T14:55:13,192 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName e2dc0ecfbe8bd1c496953eed3786cf3c 2024-12-10T14:55:13,192 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(894): Instantiated testtb-testExportWithTargetName,,1733842512739.e2dc0ecfbe8bd1c496953eed3786cf3c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:55:13,192 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(7327): checking encryption for e2dc0ecfbe8bd1c496953eed3786cf3c 2024-12-10T14:55:13,192 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(7330): checking classloading for e2dc0ecfbe8bd1c496953eed3786cf3c 2024-12-10T14:55:13,195 INFO [StoreOpener-e2dc0ecfbe8bd1c496953eed3786cf3c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region e2dc0ecfbe8bd1c496953eed3786cf3c 2024-12-10T14:55:13,198 INFO [StoreOpener-e2dc0ecfbe8bd1c496953eed3786cf3c-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e2dc0ecfbe8bd1c496953eed3786cf3c columnFamilyName cf 2024-12-10T14:55:13,198 DEBUG [StoreOpener-e2dc0ecfbe8bd1c496953eed3786cf3c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:55:13,198 INFO [StoreOpener-e2dc0ecfbe8bd1c496953eed3786cf3c-1 {}] regionserver.HStore(327): Store=e2dc0ecfbe8bd1c496953eed3786cf3c/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T14:55:13,200 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithTargetName/e2dc0ecfbe8bd1c496953eed3786cf3c 2024-12-10T14:55:13,200 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] handler.AssignRegionHandler(135): Open testtb-testExportWithTargetName,1,1733842512739.5ecadaf55e885f390398f6ef1e12d757. 2024-12-10T14:55:13,201 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(7285): Opening region: {ENCODED => 5ecadaf55e885f390398f6ef1e12d757, NAME => 'testtb-testExportWithTargetName,1,1733842512739.5ecadaf55e885f390398f6ef1e12d757.', STARTKEY => '1', ENDKEY => ''} 2024-12-10T14:55:13,201 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithTargetName/e2dc0ecfbe8bd1c496953eed3786cf3c 2024-12-10T14:55:13,201 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportWithTargetName,1,1733842512739.5ecadaf55e885f390398f6ef1e12d757. service=AccessControlService 2024-12-10T14:55:13,201 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-10T14:55:13,201 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName 5ecadaf55e885f390398f6ef1e12d757 2024-12-10T14:55:13,202 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(894): Instantiated testtb-testExportWithTargetName,1,1733842512739.5ecadaf55e885f390398f6ef1e12d757.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:55:13,202 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(7327): checking encryption for 5ecadaf55e885f390398f6ef1e12d757 2024-12-10T14:55:13,202 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(7330): checking classloading for 5ecadaf55e885f390398f6ef1e12d757 2024-12-10T14:55:13,204 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1085): writing seq id for e2dc0ecfbe8bd1c496953eed3786cf3c 2024-12-10T14:55:13,204 INFO [StoreOpener-5ecadaf55e885f390398f6ef1e12d757-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 5ecadaf55e885f390398f6ef1e12d757 2024-12-10T14:55:13,207 INFO [StoreOpener-5ecadaf55e885f390398f6ef1e12d757-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5ecadaf55e885f390398f6ef1e12d757 columnFamilyName cf 2024-12-10T14:55:13,207 DEBUG [StoreOpener-5ecadaf55e885f390398f6ef1e12d757-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:55:13,208 INFO [StoreOpener-5ecadaf55e885f390398f6ef1e12d757-1 {}] regionserver.HStore(327): Store=5ecadaf55e885f390398f6ef1e12d757/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T14:55:13,208 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithTargetName/e2dc0ecfbe8bd1c496953eed3786cf3c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T14:55:13,209 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithTargetName/5ecadaf55e885f390398f6ef1e12d757 2024-12-10T14:55:13,209 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1102): Opened e2dc0ecfbe8bd1c496953eed3786cf3c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64601433, jitterRate=-0.03736363351345062}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-10T14:55:13,210 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithTargetName/5ecadaf55e885f390398f6ef1e12d757 2024-12-10T14:55:13,211 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1001): Region open journal for e2dc0ecfbe8bd1c496953eed3786cf3c: 2024-12-10T14:55:13,213 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportWithTargetName,,1733842512739.e2dc0ecfbe8bd1c496953eed3786cf3c., pid=15, masterSystemTime=1733842513165 2024-12-10T14:55:13,213 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(1085): writing seq id for 5ecadaf55e885f390398f6ef1e12d757 2024-12-10T14:55:13,216 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportWithTargetName,,1733842512739.e2dc0ecfbe8bd1c496953eed3786cf3c. 2024-12-10T14:55:13,216 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] handler.AssignRegionHandler(164): Opened testtb-testExportWithTargetName,,1733842512739.e2dc0ecfbe8bd1c496953eed3786cf3c. 2024-12-10T14:55:13,218 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=13 updating hbase:meta row=e2dc0ecfbe8bd1c496953eed3786cf3c, regionState=OPEN, openSeqNum=2, regionLocation=db1d50717577,37707,1733842502956 2024-12-10T14:55:13,218 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithTargetName/5ecadaf55e885f390398f6ef1e12d757/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T14:55:13,220 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(1102): Opened 5ecadaf55e885f390398f6ef1e12d757; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63291309, jitterRate=-0.05688600242137909}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-10T14:55:13,220 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(1001): Region open journal for 5ecadaf55e885f390398f6ef1e12d757: 2024-12-10T14:55:13,222 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportWithTargetName,1,1733842512739.5ecadaf55e885f390398f6ef1e12d757., pid=16, masterSystemTime=1733842513175 2024-12-10T14:55:13,225 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportWithTargetName,1,1733842512739.5ecadaf55e885f390398f6ef1e12d757. 2024-12-10T14:55:13,225 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] handler.AssignRegionHandler(164): Opened testtb-testExportWithTargetName,1,1733842512739.5ecadaf55e885f390398f6ef1e12d757. 2024-12-10T14:55:13,226 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=13 2024-12-10T14:55:13,226 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=13, state=SUCCESS; OpenRegionProcedure e2dc0ecfbe8bd1c496953eed3786cf3c, server=db1d50717577,37707,1733842502956 in 212 msec 2024-12-10T14:55:13,226 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=14 updating hbase:meta row=5ecadaf55e885f390398f6ef1e12d757, regionState=OPEN, openSeqNum=2, regionLocation=db1d50717577,45255,1733842503015 2024-12-10T14:55:13,228 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=e2dc0ecfbe8bd1c496953eed3786cf3c, ASSIGN in 376 msec 2024-12-10T14:55:13,234 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=16, resume processing ppid=14 2024-12-10T14:55:13,235 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, ppid=14, state=SUCCESS; OpenRegionProcedure 5ecadaf55e885f390398f6ef1e12d757, server=db1d50717577,45255,1733842503015 in 209 msec 2024-12-10T14:55:13,239 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=14, resume processing ppid=12 2024-12-10T14:55:13,240 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, ppid=12, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=5ecadaf55e885f390398f6ef1e12d757, ASSIGN in 384 msec 2024-12-10T14:55:13,241 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=12, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-10T14:55:13,241 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733842513241"}]},"ts":"1733842513241"} 2024-12-10T14:55:13,244 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithTargetName, state=ENABLED in hbase:meta 2024-12-10T14:55:13,247 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=12, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_POST_OPERATION 2024-12-10T14:55:13,252 DEBUG [PEWorker-1 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportWithTargetName jenkins: RWXCA 2024-12-10T14:55:13,257 DEBUG [PEWorker-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:55:13,264 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38936, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:55:13,269 DEBUG [hconnection-0x2b7dab-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:55:13,270 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59588, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=ClientService 2024-12-10T14:55:13,277 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41775 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-10T14:55:13,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-10T14:55:13,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45255-0x1019cdee8310003, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-10T14:55:13,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-10T14:55:13,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T14:55:13,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45255-0x1019cdee8310003, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T14:55:13,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T14:55:13,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-10T14:55:13,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T14:55:13,310 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-10T14:55:13,310 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-10T14:55:13,316 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-10T14:55:13,318 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-10T14:55:13,320 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; CreateTableProcedure table=testtb-testExportWithTargetName in 570 msec 2024-12-10T14:55:13,321 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-10T14:55:13,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-10T14:55:13,356 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportWithTargetName, procId: 12 completed 2024-12-10T14:55:13,356 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testExportWithTargetName get assigned. Timeout = 60000ms 2024-12-10T14:55:13,357 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-10T14:55:13,362 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testExportWithTargetName assigned to meta. Checking AM states. 2024-12-10T14:55:13,363 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-10T14:55:13,363 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testExportWithTargetName assigned. 2024-12-10T14:55:13,374 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-10T14:55:13,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733842513374 (current time:1733842513374). 2024-12-10T14:55:13,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-10T14:55:13,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-12-10T14:55:13,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-10T14:55:13,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0f7ea9cb to 127.0.0.1:57176 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@b6653f8 2024-12-10T14:55:13,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6496fc83, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:55:13,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:55:13,385 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59594, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:55:13,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0f7ea9cb to 127.0.0.1:57176 2024-12-10T14:55:13,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:55:13,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x606a6df6 to 127.0.0.1:57176 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3fe91150 2024-12-10T14:55:13,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3fb82ecb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:55:13,397 DEBUG [hconnection-0xbdc47d1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:55:13,398 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59608, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:55:13,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:55:13,402 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38942, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:55:13,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x606a6df6 to 127.0.0.1:57176 2024-12-10T14:55:13,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:55:13,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-10T14:55:13,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-10T14:55:13,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] procedure2.ProcedureExecutor(1098): Stored pid=17, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-10T14:55:13,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 17 2024-12-10T14:55:13,429 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-10T14:55:13,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=17 2024-12-10T14:55:13,434 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-10T14:55:13,449 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-10T14:55:13,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741847_1023 (size=167) 2024-12-10T14:55:13,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741847_1023 (size=167) 2024-12-10T14:55:13,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741847_1023 (size=167) 2024-12-10T14:55:13,462 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-10T14:55:13,465 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=18, ppid=17, state=RUNNABLE; SnapshotRegionProcedure e2dc0ecfbe8bd1c496953eed3786cf3c}, {pid=19, ppid=17, state=RUNNABLE; SnapshotRegionProcedure 5ecadaf55e885f390398f6ef1e12d757}] 2024-12-10T14:55:13,469 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=18, ppid=17, state=RUNNABLE; SnapshotRegionProcedure e2dc0ecfbe8bd1c496953eed3786cf3c 2024-12-10T14:55:13,469 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=19, ppid=17, state=RUNNABLE; SnapshotRegionProcedure 5ecadaf55e885f390398f6ef1e12d757 2024-12-10T14:55:13,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=17 2024-12-10T14:55:13,625 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,37707,1733842502956 2024-12-10T14:55:13,625 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,45255,1733842503015 2024-12-10T14:55:13,627 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45255 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=19 2024-12-10T14:55:13,628 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1733842512739.5ecadaf55e885f390398f6ef1e12d757. 2024-12-10T14:55:13,628 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37707 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=18 2024-12-10T14:55:13,628 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1733842512739.e2dc0ecfbe8bd1c496953eed3786cf3c. 2024-12-10T14:55:13,630 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] regionserver.HRegion(2538): Flush status journal for e2dc0ecfbe8bd1c496953eed3786cf3c: 2024-12-10T14:55:13,630 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for 5ecadaf55e885f390398f6ef1e12d757: 2024-12-10T14:55:13,630 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1733842512739.e2dc0ecfbe8bd1c496953eed3786cf3c. for emptySnaptb0-testExportWithTargetName completed. 2024-12-10T14:55:13,630 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1733842512739.5ecadaf55e885f390398f6ef1e12d757. for emptySnaptb0-testExportWithTargetName completed. 2024-12-10T14:55:13,631 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1733842512739.5ecadaf55e885f390398f6ef1e12d757.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-12-10T14:55:13,631 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1733842512739.e2dc0ecfbe8bd1c496953eed3786cf3c.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-12-10T14:55:13,635 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-10T14:55:13,635 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-10T14:55:13,637 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-10T14:55:13,637 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-10T14:55:13,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741849_1025 (size=70) 2024-12-10T14:55:13,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741848_1024 (size=70) 2024-12-10T14:55:13,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741849_1025 (size=70) 2024-12-10T14:55:13,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741848_1024 (size=70) 2024-12-10T14:55:13,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741848_1024 (size=70) 2024-12-10T14:55:13,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741849_1025 (size=70) 2024-12-10T14:55:13,659 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1733842512739.5ecadaf55e885f390398f6ef1e12d757. 2024-12-10T14:55:13,661 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-12-10T14:55:13,661 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1733842512739.e2dc0ecfbe8bd1c496953eed3786cf3c. 2024-12-10T14:55:13,661 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=18 2024-12-10T14:55:13,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.HMaster(4106): Remote procedure done, pid=18 2024-12-10T14:55:13,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-12-10T14:55:13,664 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region e2dc0ecfbe8bd1c496953eed3786cf3c 2024-12-10T14:55:13,664 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region 5ecadaf55e885f390398f6ef1e12d757 2024-12-10T14:55:13,664 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=19, ppid=17, state=RUNNABLE; SnapshotRegionProcedure 5ecadaf55e885f390398f6ef1e12d757 2024-12-10T14:55:13,664 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=18, ppid=17, state=RUNNABLE; SnapshotRegionProcedure e2dc0ecfbe8bd1c496953eed3786cf3c 2024-12-10T14:55:13,668 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=17, state=SUCCESS; SnapshotRegionProcedure 5ecadaf55e885f390398f6ef1e12d757 in 200 msec 2024-12-10T14:55:13,669 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=18, resume processing ppid=17 2024-12-10T14:55:13,670 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, ppid=17, state=SUCCESS; SnapshotRegionProcedure e2dc0ecfbe8bd1c496953eed3786cf3c in 200 msec 2024-12-10T14:55:13,670 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-10T14:55:13,672 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-10T14:55:13,675 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-10T14:55:13,675 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithTargetName 2024-12-10T14:55:13,679 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName 2024-12-10T14:55:13,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741850_1026 (size=549) 2024-12-10T14:55:13,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741850_1026 (size=549) 2024-12-10T14:55:13,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741850_1026 (size=549) 2024-12-10T14:55:13,705 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-10T14:55:13,717 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-10T14:55:13,718 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/emptySnaptb0-testExportWithTargetName 2024-12-10T14:55:13,720 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-10T14:55:13,721 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 17 2024-12-10T14:55:13,723 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 298 msec 2024-12-10T14:55:13,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=17 2024-12-10T14:55:13,735 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName, procId: 17 completed 2024-12-10T14:55:13,761 DEBUG [htable-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:55:13,762 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37707 {}] regionserver.HRegion(8254): writing data to region testtb-testExportWithTargetName,,1733842512739.e2dc0ecfbe8bd1c496953eed3786cf3c. with WAL disabled. Data may be lost in the event of a crash. 2024-12-10T14:55:13,764 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38136, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:55:13,767 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45255 {}] regionserver.HRegion(8254): writing data to region testtb-testExportWithTargetName,1,1733842512739.5ecadaf55e885f390398f6ef1e12d757. with WAL disabled. Data may be lost in the event of a crash. 2024-12-10T14:55:13,775 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportWithTargetName 2024-12-10T14:55:13,776 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportWithTargetName,,1733842512739.e2dc0ecfbe8bd1c496953eed3786cf3c. 2024-12-10T14:55:13,777 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-10T14:55:13,824 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-10T14:55:13,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733842513824 (current time:1733842513824). 2024-12-10T14:55:13,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-10T14:55:13,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-12-10T14:55:13,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-10T14:55:13,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x67ffd640 to 127.0.0.1:57176 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2c56f322 2024-12-10T14:55:13,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@545e45c5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:55:13,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:55:13,837 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59610, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:55:13,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x67ffd640 to 127.0.0.1:57176 2024-12-10T14:55:13,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:55:13,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x37ffeb4f to 127.0.0.1:57176 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2cabc171 2024-12-10T14:55:13,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@a7dc94c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:55:13,857 DEBUG [hconnection-0x7f0bde1a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:55:13,858 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59614, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:55:13,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:55:13,862 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38952, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:55:13,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x37ffeb4f to 127.0.0.1:57176 2024-12-10T14:55:13,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:55:13,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-10T14:55:13,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-10T14:55:13,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-10T14:55:13,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 20 2024-12-10T14:55:13,871 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-10T14:55:13,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-10T14:55:13,873 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-10T14:55:13,878 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-10T14:55:13,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741851_1027 (size=162) 2024-12-10T14:55:13,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741851_1027 (size=162) 2024-12-10T14:55:13,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741851_1027 (size=162) 2024-12-10T14:55:13,900 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-10T14:55:13,900 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; SnapshotRegionProcedure e2dc0ecfbe8bd1c496953eed3786cf3c}, {pid=22, ppid=20, state=RUNNABLE; SnapshotRegionProcedure 5ecadaf55e885f390398f6ef1e12d757}] 2024-12-10T14:55:13,902 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=22, ppid=20, state=RUNNABLE; SnapshotRegionProcedure 5ecadaf55e885f390398f6ef1e12d757 2024-12-10T14:55:13,902 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=21, ppid=20, state=RUNNABLE; SnapshotRegionProcedure e2dc0ecfbe8bd1c496953eed3786cf3c 2024-12-10T14:55:13,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-10T14:55:14,054 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,45255,1733842503015 2024-12-10T14:55:14,055 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,37707,1733842502956 2024-12-10T14:55:14,056 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45255 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=22 2024-12-10T14:55:14,057 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37707 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=21 2024-12-10T14:55:14,074 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1733842512739.5ecadaf55e885f390398f6ef1e12d757. 2024-12-10T14:55:14,074 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.HRegion(2837): Flushing 5ecadaf55e885f390398f6ef1e12d757 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-12-10T14:55:14,087 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1733842512739.e2dc0ecfbe8bd1c496953eed3786cf3c. 2024-12-10T14:55:14,087 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing e2dc0ecfbe8bd1c496953eed3786cf3c 1/1 column families, dataSize=266 B heapSize=832 B 2024-12-10T14:55:14,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-10T14:55:14,242 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithTargetName/e2dc0ecfbe8bd1c496953eed3786cf3c/.tmp/cf/5ac41715479e45c2a5c261a68b015e05 is 71, key is 0090871de0dfa926b0e3188142ffe5e7/cf:q/1733842513762/Put/seqid=0 2024-12-10T14:55:14,242 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithTargetName/5ecadaf55e885f390398f6ef1e12d757/.tmp/cf/e167e54369bb48a6863ed67fa7581e23 is 71, key is 13b6991d520f5e91f3f27b37b2b9ea9b/cf:q/1733842513767/Put/seqid=0 2024-12-10T14:55:14,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741852_1028 (size=5354) 2024-12-10T14:55:14,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741852_1028 (size=5354) 2024-12-10T14:55:14,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741852_1028 (size=5354) 2024-12-10T14:55:14,285 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=266 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithTargetName/e2dc0ecfbe8bd1c496953eed3786cf3c/.tmp/cf/5ac41715479e45c2a5c261a68b015e05 2024-12-10T14:55:14,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741853_1029 (size=8256) 2024-12-10T14:55:14,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741853_1029 (size=8256) 2024-12-10T14:55:14,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741853_1029 (size=8256) 2024-12-10T14:55:14,332 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.00 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithTargetName/5ecadaf55e885f390398f6ef1e12d757/.tmp/cf/e167e54369bb48a6863ed67fa7581e23 2024-12-10T14:55:14,368 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithTargetName/e2dc0ecfbe8bd1c496953eed3786cf3c/.tmp/cf/5ac41715479e45c2a5c261a68b015e05 as hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithTargetName/e2dc0ecfbe8bd1c496953eed3786cf3c/cf/5ac41715479e45c2a5c261a68b015e05 2024-12-10T14:55:14,369 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithTargetName/5ecadaf55e885f390398f6ef1e12d757/.tmp/cf/e167e54369bb48a6863ed67fa7581e23 as hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithTargetName/5ecadaf55e885f390398f6ef1e12d757/cf/e167e54369bb48a6863ed67fa7581e23 2024-12-10T14:55:14,383 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithTargetName/e2dc0ecfbe8bd1c496953eed3786cf3c/cf/5ac41715479e45c2a5c261a68b015e05, entries=4, sequenceid=6, filesize=5.2 K 2024-12-10T14:55:14,386 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithTargetName/5ecadaf55e885f390398f6ef1e12d757/cf/e167e54369bb48a6863ed67fa7581e23, entries=46, sequenceid=6, filesize=8.1 K 2024-12-10T14:55:14,387 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for e2dc0ecfbe8bd1c496953eed3786cf3c in 300ms, sequenceid=6, compaction requested=false 2024-12-10T14:55:14,387 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithTargetName' 2024-12-10T14:55:14,389 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for e2dc0ecfbe8bd1c496953eed3786cf3c: 2024-12-10T14:55:14,389 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1733842512739.e2dc0ecfbe8bd1c496953eed3786cf3c. for snaptb0-testExportWithTargetName completed. 2024-12-10T14:55:14,390 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1733842512739.e2dc0ecfbe8bd1c496953eed3786cf3c.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-12-10T14:55:14,390 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-10T14:55:14,390 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithTargetName/e2dc0ecfbe8bd1c496953eed3786cf3c/cf/5ac41715479e45c2a5c261a68b015e05] hfiles 2024-12-10T14:55:14,390 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithTargetName/e2dc0ecfbe8bd1c496953eed3786cf3c/cf/5ac41715479e45c2a5c261a68b015e05 for snapshot=snaptb0-testExportWithTargetName 2024-12-10T14:55:14,392 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.HRegion(3040): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for 5ecadaf55e885f390398f6ef1e12d757 in 318ms, sequenceid=6, compaction requested=false 2024-12-10T14:55:14,393 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.HRegion(2538): Flush status journal for 5ecadaf55e885f390398f6ef1e12d757: 2024-12-10T14:55:14,393 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1733842512739.5ecadaf55e885f390398f6ef1e12d757. for snaptb0-testExportWithTargetName completed. 2024-12-10T14:55:14,393 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1733842512739.5ecadaf55e885f390398f6ef1e12d757.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-12-10T14:55:14,393 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-10T14:55:14,393 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithTargetName/5ecadaf55e885f390398f6ef1e12d757/cf/e167e54369bb48a6863ed67fa7581e23] hfiles 2024-12-10T14:55:14,393 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithTargetName/5ecadaf55e885f390398f6ef1e12d757/cf/e167e54369bb48a6863ed67fa7581e23 for snapshot=snaptb0-testExportWithTargetName 2024-12-10T14:55:14,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741855_1031 (size=109) 2024-12-10T14:55:14,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741855_1031 (size=109) 2024-12-10T14:55:14,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741855_1031 (size=109) 2024-12-10T14:55:14,435 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1733842512739.5ecadaf55e885f390398f6ef1e12d757. 2024-12-10T14:55:14,435 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=22 2024-12-10T14:55:14,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.HMaster(4106): Remote procedure done, pid=22 2024-12-10T14:55:14,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741854_1030 (size=109) 2024-12-10T14:55:14,436 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region 5ecadaf55e885f390398f6ef1e12d757 2024-12-10T14:55:14,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741854_1030 (size=109) 2024-12-10T14:55:14,436 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=22, ppid=20, state=RUNNABLE; SnapshotRegionProcedure 5ecadaf55e885f390398f6ef1e12d757 2024-12-10T14:55:14,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741854_1030 (size=109) 2024-12-10T14:55:14,439 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1733842512739.e2dc0ecfbe8bd1c496953eed3786cf3c. 2024-12-10T14:55:14,439 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-12-10T14:55:14,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-12-10T14:55:14,441 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region e2dc0ecfbe8bd1c496953eed3786cf3c 2024-12-10T14:55:14,441 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=21, ppid=20, state=RUNNABLE; SnapshotRegionProcedure e2dc0ecfbe8bd1c496953eed3786cf3c 2024-12-10T14:55:14,444 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=22, ppid=20, state=SUCCESS; SnapshotRegionProcedure 5ecadaf55e885f390398f6ef1e12d757 in 538 msec 2024-12-10T14:55:14,446 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=21, resume processing ppid=20 2024-12-10T14:55:14,446 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; SnapshotRegionProcedure e2dc0ecfbe8bd1c496953eed3786cf3c in 543 msec 2024-12-10T14:55:14,446 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-10T14:55:14,448 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-10T14:55:14,450 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-10T14:55:14,450 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithTargetName 2024-12-10T14:55:14,452 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName 2024-12-10T14:55:14,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-10T14:55:14,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741856_1032 (size=627) 2024-12-10T14:55:14,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741856_1032 (size=627) 2024-12-10T14:55:14,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741856_1032 (size=627) 2024-12-10T14:55:14,499 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-10T14:55:14,509 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-10T14:55:14,510 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-12-10T14:55:14,514 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-10T14:55:14,514 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 20 2024-12-10T14:55:14,517 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 646 msec 2024-12-10T14:55:14,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-10T14:55:14,982 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName, procId: 20 completed 2024-12-10T14:55:14,982 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842514982 2024-12-10T14:55:14,982 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:36449, tgtDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842514982, rawTgtDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842514982, srcFsUri=hdfs://localhost:36449, srcDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef 2024-12-10T14:55:15,030 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:36449, inputRoot=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef 2024-12-10T14:55:15,030 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_996159037_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842514982, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842514982/.hbase-snapshot/.tmp/testExportWithTargetName 2024-12-10T14:55:15,036 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-10T14:55:15,044 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/snaptb0-testExportWithTargetName to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842514982/.hbase-snapshot/.tmp/testExportWithTargetName 2024-12-10T14:55:15,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741857_1033 (size=627) 2024-12-10T14:55:15,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741857_1033 (size=627) 2024-12-10T14:55:15,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741857_1033 (size=627) 2024-12-10T14:55:15,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741858_1034 (size=162) 2024-12-10T14:55:15,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741858_1034 (size=162) 2024-12-10T14:55:15,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741858_1034 (size=162) 2024-12-10T14:55:15,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741859_1035 (size=154) 2024-12-10T14:55:15,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741859_1035 (size=154) 2024-12-10T14:55:15,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741859_1035 (size=154) 2024-12-10T14:55:15,095 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-10T14:55:15,096 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-10T14:55:15,096 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-10T14:55:15,096 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-10T14:55:16,333 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/hadoop-10604054487716406999.jar 2024-12-10T14:55:16,334 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-10T14:55:16,334 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-10T14:55:16,439 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/hadoop-3338322457895986152.jar 2024-12-10T14:55:16,440 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-10T14:55:16,440 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-10T14:55:16,441 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-10T14:55:16,441 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-10T14:55:16,442 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-10T14:55:16,442 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-10T14:55:16,443 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-10T14:55:16,443 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-10T14:55:16,444 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-10T14:55:16,444 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-10T14:55:16,445 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-10T14:55:16,446 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-10T14:55:16,446 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-10T14:55:16,447 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-10T14:55:16,447 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-10T14:55:16,448 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-10T14:55:16,448 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-10T14:55:16,449 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-10T14:55:16,452 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-10T14:55:16,452 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-10T14:55:16,453 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-10T14:55:16,453 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-10T14:55:16,454 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-10T14:55:16,454 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-10T14:55:16,455 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-10T14:55:16,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741860_1036 (size=127628) 2024-12-10T14:55:16,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741860_1036 (size=127628) 2024-12-10T14:55:16,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741860_1036 (size=127628) 2024-12-10T14:55:16,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741861_1037 (size=2172101) 2024-12-10T14:55:16,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741861_1037 (size=2172101) 2024-12-10T14:55:16,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741861_1037 (size=2172101) 2024-12-10T14:55:16,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741862_1038 (size=213228) 2024-12-10T14:55:16,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741862_1038 (size=213228) 2024-12-10T14:55:16,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741862_1038 (size=213228) 2024-12-10T14:55:16,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741863_1039 (size=1877034) 2024-12-10T14:55:16,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741863_1039 (size=1877034) 2024-12-10T14:55:16,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741863_1039 (size=1877034) 2024-12-10T14:55:16,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741864_1040 (size=533455) 2024-12-10T14:55:16,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741864_1040 (size=533455) 2024-12-10T14:55:16,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741864_1040 (size=533455) 2024-12-10T14:55:17,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741865_1041 (size=7280644) 2024-12-10T14:55:17,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741865_1041 (size=7280644) 2024-12-10T14:55:17,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741865_1041 (size=7280644) 2024-12-10T14:55:17,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741866_1042 (size=4188619) 2024-12-10T14:55:17,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741866_1042 (size=4188619) 2024-12-10T14:55:17,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741866_1042 (size=4188619) 2024-12-10T14:55:17,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741867_1043 (size=20406) 2024-12-10T14:55:17,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741867_1043 (size=20406) 2024-12-10T14:55:17,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741867_1043 (size=20406) 2024-12-10T14:55:17,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741868_1044 (size=75495) 2024-12-10T14:55:17,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741868_1044 (size=75495) 2024-12-10T14:55:17,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741868_1044 (size=75495) 2024-12-10T14:55:17,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741869_1045 (size=6350861) 2024-12-10T14:55:17,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741869_1045 (size=6350861) 2024-12-10T14:55:17,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741869_1045 (size=6350861) 2024-12-10T14:55:17,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741870_1046 (size=45609) 2024-12-10T14:55:17,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741870_1046 (size=45609) 2024-12-10T14:55:17,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741870_1046 (size=45609) 2024-12-10T14:55:17,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741871_1047 (size=110084) 2024-12-10T14:55:17,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741871_1047 (size=110084) 2024-12-10T14:55:17,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741871_1047 (size=110084) 2024-12-10T14:55:17,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741872_1048 (size=1323991) 2024-12-10T14:55:17,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741872_1048 (size=1323991) 2024-12-10T14:55:17,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741872_1048 (size=1323991) 2024-12-10T14:55:17,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741873_1049 (size=23076) 2024-12-10T14:55:17,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741873_1049 (size=23076) 2024-12-10T14:55:17,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741873_1049 (size=23076) 2024-12-10T14:55:17,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741874_1050 (size=126803) 2024-12-10T14:55:17,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741874_1050 (size=126803) 2024-12-10T14:55:17,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741874_1050 (size=126803) 2024-12-10T14:55:17,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741875_1051 (size=322274) 2024-12-10T14:55:17,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741875_1051 (size=322274) 2024-12-10T14:55:17,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741875_1051 (size=322274) 2024-12-10T14:55:17,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741876_1052 (size=451756) 2024-12-10T14:55:17,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741876_1052 (size=451756) 2024-12-10T14:55:17,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741876_1052 (size=451756) 2024-12-10T14:55:17,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741877_1053 (size=1832290) 2024-12-10T14:55:17,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741877_1053 (size=1832290) 2024-12-10T14:55:17,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741877_1053 (size=1832290) 2024-12-10T14:55:17,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741878_1054 (size=30081) 2024-12-10T14:55:17,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741878_1054 (size=30081) 2024-12-10T14:55:17,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741878_1054 (size=30081) 2024-12-10T14:55:17,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741879_1055 (size=53616) 2024-12-10T14:55:17,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741879_1055 (size=53616) 2024-12-10T14:55:17,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741879_1055 (size=53616) 2024-12-10T14:55:17,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741880_1056 (size=29229) 2024-12-10T14:55:17,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741880_1056 (size=29229) 2024-12-10T14:55:17,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741880_1056 (size=29229) 2024-12-10T14:55:17,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741881_1057 (size=169089) 2024-12-10T14:55:17,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741881_1057 (size=169089) 2024-12-10T14:55:17,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741881_1057 (size=169089) 2024-12-10T14:55:17,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741882_1058 (size=5175431) 2024-12-10T14:55:17,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741882_1058 (size=5175431) 2024-12-10T14:55:17,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741882_1058 (size=5175431) 2024-12-10T14:55:17,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741883_1059 (size=136454) 2024-12-10T14:55:17,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741883_1059 (size=136454) 2024-12-10T14:55:17,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741883_1059 (size=136454) 2024-12-10T14:55:17,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741884_1060 (size=907858) 2024-12-10T14:55:17,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741884_1060 (size=907858) 2024-12-10T14:55:17,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741884_1060 (size=907858) 2024-12-10T14:55:17,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741885_1061 (size=3317408) 2024-12-10T14:55:17,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741885_1061 (size=3317408) 2024-12-10T14:55:17,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741885_1061 (size=3317408) 2024-12-10T14:55:17,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741886_1062 (size=503880) 2024-12-10T14:55:17,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741886_1062 (size=503880) 2024-12-10T14:55:17,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741886_1062 (size=503880) 2024-12-10T14:55:18,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741887_1063 (size=4695811) 2024-12-10T14:55:18,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741887_1063 (size=4695811) 2024-12-10T14:55:18,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741887_1063 (size=4695811) 2024-12-10T14:55:18,443 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-10T14:55:18,454 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testExportWithTargetName' hfile list 2024-12-10T14:55:18,464 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K 2024-12-10T14:55:18,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741888_1064 (size=342) 2024-12-10T14:55:18,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741888_1064 (size=342) 2024-12-10T14:55:18,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741888_1064 (size=342) 2024-12-10T14:55:18,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741889_1065 (size=15) 2024-12-10T14:55:18,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741889_1065 (size=15) 2024-12-10T14:55:18,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741889_1065 (size=15) 2024-12-10T14:55:18,695 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-10T14:55:18,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741890_1066 (size=304888) 2024-12-10T14:55:18,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741890_1066 (size=304888) 2024-12-10T14:55:18,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741890_1066 (size=304888) 2024-12-10T14:55:19,232 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-10T14:55:19,233 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-10T14:55:19,401 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733842510486_0001_000001 (auth:SIMPLE) from 127.0.0.1:56348 2024-12-10T14:55:22,593 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-12-10T14:55:22,593 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName Metrics about Tables on a single HBase RegionServer 2024-12-10T14:55:27,187 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733842510486_0001_000001 (auth:SIMPLE) from 127.0.0.1:55502 2024-12-10T14:55:27,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741891_1067 (size=350562) 2024-12-10T14:55:27,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741891_1067 (size=350562) 2024-12-10T14:55:27,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741891_1067 (size=350562) 2024-12-10T14:55:29,542 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733842510486_0001_000001 (auth:SIMPLE) from 127.0.0.1:40884 2024-12-10T14:55:30,923 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-10T14:55:33,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741892_1068 (size=8256) 2024-12-10T14:55:33,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741892_1068 (size=8256) 2024-12-10T14:55:33,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741892_1068 (size=8256) 2024-12-10T14:55:33,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741893_1069 (size=5354) 2024-12-10T14:55:33,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741893_1069 (size=5354) 2024-12-10T14:55:33,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741893_1069 (size=5354) 2024-12-10T14:55:33,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741894_1070 (size=17419) 2024-12-10T14:55:33,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741894_1070 (size=17419) 2024-12-10T14:55:33,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741894_1070 (size=17419) 2024-12-10T14:55:33,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741895_1071 (size=464) 2024-12-10T14:55:33,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741895_1071 (size=464) 2024-12-10T14:55:33,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741895_1071 (size=464) 2024-12-10T14:55:33,887 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-0_0/usercache/jenkins/appcache/application_1733842510486_0001/container_1733842510486_0001_01_000002/launch_container.sh] 2024-12-10T14:55:33,887 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-0_0/usercache/jenkins/appcache/application_1733842510486_0001/container_1733842510486_0001_01_000002/container_tokens] 2024-12-10T14:55:33,887 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-0_0/usercache/jenkins/appcache/application_1733842510486_0001/container_1733842510486_0001_01_000002/sysfs] 2024-12-10T14:55:33,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741896_1072 (size=17419) 2024-12-10T14:55:33,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741896_1072 (size=17419) 2024-12-10T14:55:33,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741896_1072 (size=17419) 2024-12-10T14:55:33,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741897_1073 (size=350562) 2024-12-10T14:55:33,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741897_1073 (size=350562) 2024-12-10T14:55:33,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741897_1073 (size=350562) 2024-12-10T14:55:33,955 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733842510486_0001_000001 (auth:SIMPLE) from 127.0.0.1:53508 2024-12-10T14:55:35,607 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-10T14:55:35,608 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-10T14:55:35,614 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-10T14:55:35,615 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38710, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-10T14:55:35,617 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: testExportWithTargetName 2024-12-10T14:55:35,617 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-10T14:55:35,618 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-10T14:55:35,618 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_996159037_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/snaptb0-testExportWithTargetName at hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-12-10T14:55:35,619 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/snaptb0-testExportWithTargetName/.snapshotinfo 2024-12-10T14:55:35,619 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/snaptb0-testExportWithTargetName/data.manifest 2024-12-10T14:55:35,619 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_996159037_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842514982/.hbase-snapshot/testExportWithTargetName at hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842514982/.hbase-snapshot/testExportWithTargetName 2024-12-10T14:55:35,620 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842514982/.hbase-snapshot/testExportWithTargetName/.snapshotinfo 2024-12-10T14:55:35,620 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842514982/.hbase-snapshot/testExportWithTargetName/data.manifest 2024-12-10T14:55:35,630 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportWithTargetName 2024-12-10T14:55:35,634 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportWithTargetName 2024-12-10T14:55:35,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] procedure2.ProcedureExecutor(1098): Stored pid=23, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportWithTargetName 2024-12-10T14:55:35,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=23 2024-12-10T14:55:35,648 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733842535647"}]},"ts":"1733842535647"} 2024-12-10T14:55:35,652 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithTargetName, state=DISABLING in hbase:meta 2024-12-10T14:55:35,655 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithTargetName to state=DISABLING 2024-12-10T14:55:35,658 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=24, ppid=23, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportWithTargetName}] 2024-12-10T14:55:35,663 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=25, ppid=24, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=e2dc0ecfbe8bd1c496953eed3786cf3c, UNASSIGN}, {pid=26, ppid=24, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=5ecadaf55e885f390398f6ef1e12d757, UNASSIGN}] 2024-12-10T14:55:35,664 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=26, ppid=24, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=5ecadaf55e885f390398f6ef1e12d757, UNASSIGN 2024-12-10T14:55:35,665 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=25, ppid=24, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=e2dc0ecfbe8bd1c496953eed3786cf3c, UNASSIGN 2024-12-10T14:55:35,666 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=25 updating hbase:meta row=e2dc0ecfbe8bd1c496953eed3786cf3c, regionState=CLOSING, regionLocation=db1d50717577,37707,1733842502956 2024-12-10T14:55:35,666 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=26 updating hbase:meta row=5ecadaf55e885f390398f6ef1e12d757, regionState=CLOSING, regionLocation=db1d50717577,45255,1733842503015 2024-12-10T14:55:35,668 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-10T14:55:35,668 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=27, ppid=25, state=RUNNABLE; CloseRegionProcedure e2dc0ecfbe8bd1c496953eed3786cf3c, server=db1d50717577,37707,1733842502956}] 2024-12-10T14:55:35,671 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-10T14:55:35,671 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=28, ppid=26, state=RUNNABLE; CloseRegionProcedure 5ecadaf55e885f390398f6ef1e12d757, server=db1d50717577,45255,1733842503015}] 2024-12-10T14:55:35,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=23 2024-12-10T14:55:35,823 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,37707,1733842502956 2024-12-10T14:55:35,824 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,45255,1733842503015 2024-12-10T14:55:35,825 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] handler.UnassignRegionHandler(124): Close e2dc0ecfbe8bd1c496953eed3786cf3c 2024-12-10T14:55:35,826 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-10T14:55:35,827 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1681): Closing e2dc0ecfbe8bd1c496953eed3786cf3c, disabling compactions & flushes 2024-12-10T14:55:35,827 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1703): Closing region testtb-testExportWithTargetName,,1733842512739.e2dc0ecfbe8bd1c496953eed3786cf3c. 2024-12-10T14:55:35,827 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithTargetName,,1733842512739.e2dc0ecfbe8bd1c496953eed3786cf3c. 2024-12-10T14:55:35,827 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithTargetName,,1733842512739.e2dc0ecfbe8bd1c496953eed3786cf3c. after waiting 0 ms 2024-12-10T14:55:35,827 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithTargetName,,1733842512739.e2dc0ecfbe8bd1c496953eed3786cf3c. 2024-12-10T14:55:35,828 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] handler.UnassignRegionHandler(124): Close 5ecadaf55e885f390398f6ef1e12d757 2024-12-10T14:55:35,828 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-10T14:55:35,828 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1681): Closing 5ecadaf55e885f390398f6ef1e12d757, disabling compactions & flushes 2024-12-10T14:55:35,828 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1703): Closing region testtb-testExportWithTargetName,1,1733842512739.5ecadaf55e885f390398f6ef1e12d757. 2024-12-10T14:55:35,828 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithTargetName,1,1733842512739.5ecadaf55e885f390398f6ef1e12d757. 2024-12-10T14:55:35,829 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithTargetName,1,1733842512739.5ecadaf55e885f390398f6ef1e12d757. after waiting 0 ms 2024-12-10T14:55:35,829 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithTargetName,1,1733842512739.5ecadaf55e885f390398f6ef1e12d757. 2024-12-10T14:55:35,839 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithTargetName/e2dc0ecfbe8bd1c496953eed3786cf3c/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-10T14:55:35,845 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-10T14:55:35,846 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1922): Closed testtb-testExportWithTargetName,,1733842512739.e2dc0ecfbe8bd1c496953eed3786cf3c. 2024-12-10T14:55:35,846 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1635): Region close journal for e2dc0ecfbe8bd1c496953eed3786cf3c: 2024-12-10T14:55:35,846 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithTargetName/5ecadaf55e885f390398f6ef1e12d757/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-10T14:55:35,847 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-10T14:55:35,847 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1922): Closed testtb-testExportWithTargetName,1,1733842512739.5ecadaf55e885f390398f6ef1e12d757. 2024-12-10T14:55:35,847 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1635): Region close journal for 5ecadaf55e885f390398f6ef1e12d757: 2024-12-10T14:55:35,849 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] handler.UnassignRegionHandler(170): Closed e2dc0ecfbe8bd1c496953eed3786cf3c 2024-12-10T14:55:35,850 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=25 updating hbase:meta row=e2dc0ecfbe8bd1c496953eed3786cf3c, regionState=CLOSED 2024-12-10T14:55:35,851 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] handler.UnassignRegionHandler(170): Closed 5ecadaf55e885f390398f6ef1e12d757 2024-12-10T14:55:35,852 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=26 updating hbase:meta row=5ecadaf55e885f390398f6ef1e12d757, regionState=CLOSED 2024-12-10T14:55:35,860 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=27, resume processing ppid=25 2024-12-10T14:55:35,860 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=27, ppid=25, state=SUCCESS; CloseRegionProcedure e2dc0ecfbe8bd1c496953eed3786cf3c, server=db1d50717577,37707,1733842502956 in 188 msec 2024-12-10T14:55:35,861 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=28, resume processing ppid=26 2024-12-10T14:55:35,862 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=28, ppid=26, state=SUCCESS; CloseRegionProcedure 5ecadaf55e885f390398f6ef1e12d757, server=db1d50717577,45255,1733842503015 in 186 msec 2024-12-10T14:55:35,863 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=25, ppid=24, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=e2dc0ecfbe8bd1c496953eed3786cf3c, UNASSIGN in 197 msec 2024-12-10T14:55:35,865 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=26, resume processing ppid=24 2024-12-10T14:55:35,865 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=26, ppid=24, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=5ecadaf55e885f390398f6ef1e12d757, UNASSIGN in 199 msec 2024-12-10T14:55:35,869 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=24, resume processing ppid=23 2024-12-10T14:55:35,869 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=24, ppid=23, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportWithTargetName in 209 msec 2024-12-10T14:55:35,871 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733842535871"}]},"ts":"1733842535871"} 2024-12-10T14:55:35,873 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithTargetName, state=DISABLED in hbase:meta 2024-12-10T14:55:35,875 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithTargetName to state=DISABLED 2024-12-10T14:55:35,879 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=23, state=SUCCESS; DisableTableProcedure table=testtb-testExportWithTargetName in 240 msec 2024-12-10T14:55:35,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=23 2024-12-10T14:55:35,948 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportWithTargetName, procId: 23 completed 2024-12-10T14:55:35,952 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportWithTargetName 2024-12-10T14:55:35,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] procedure2.ProcedureExecutor(1098): Stored pid=29, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-10T14:55:35,968 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=29, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-10T14:55:35,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportWithTargetName 2024-12-10T14:55:35,971 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=29, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-10T14:55:35,976 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41775 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportWithTargetName 2024-12-10T14:55:35,982 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithTargetName/e2dc0ecfbe8bd1c496953eed3786cf3c 2024-12-10T14:55:35,986 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-10T14:55:35,986 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-10T14:55:35,986 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45255-0x1019cdee8310003, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-10T14:55:35,986 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-10T14:55:35,987 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-10T14:55:35,988 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-10T14:55:35,988 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-10T14:55:35,988 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-10T14:55:35,989 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithTargetName/e2dc0ecfbe8bd1c496953eed3786cf3c/cf, FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithTargetName/e2dc0ecfbe8bd1c496953eed3786cf3c/recovered.edits] 2024-12-10T14:55:35,992 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45255-0x1019cdee8310003, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-10T14:55:35,992 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-10T14:55:35,992 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-10T14:55:35,992 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45255-0x1019cdee8310003, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:55:35,992 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-10T14:55:35,992 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:55:35,992 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:55:35,992 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:55:35,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=29 2024-12-10T14:55:36,008 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithTargetName/5ecadaf55e885f390398f6ef1e12d757 2024-12-10T14:55:36,020 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithTargetName/5ecadaf55e885f390398f6ef1e12d757/cf, FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithTargetName/5ecadaf55e885f390398f6ef1e12d757/recovered.edits] 2024-12-10T14:55:36,028 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithTargetName/e2dc0ecfbe8bd1c496953eed3786cf3c/cf/5ac41715479e45c2a5c261a68b015e05 to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/archive/data/default/testtb-testExportWithTargetName/e2dc0ecfbe8bd1c496953eed3786cf3c/cf/5ac41715479e45c2a5c261a68b015e05 2024-12-10T14:55:36,045 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithTargetName/5ecadaf55e885f390398f6ef1e12d757/cf/e167e54369bb48a6863ed67fa7581e23 to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/archive/data/default/testtb-testExportWithTargetName/5ecadaf55e885f390398f6ef1e12d757/cf/e167e54369bb48a6863ed67fa7581e23 2024-12-10T14:55:36,048 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithTargetName/e2dc0ecfbe8bd1c496953eed3786cf3c/recovered.edits/9.seqid to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/archive/data/default/testtb-testExportWithTargetName/e2dc0ecfbe8bd1c496953eed3786cf3c/recovered.edits/9.seqid 2024-12-10T14:55:36,049 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithTargetName/e2dc0ecfbe8bd1c496953eed3786cf3c 2024-12-10T14:55:36,058 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithTargetName/5ecadaf55e885f390398f6ef1e12d757/recovered.edits/9.seqid to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/archive/data/default/testtb-testExportWithTargetName/5ecadaf55e885f390398f6ef1e12d757/recovered.edits/9.seqid 2024-12-10T14:55:36,059 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithTargetName/5ecadaf55e885f390398f6ef1e12d757 2024-12-10T14:55:36,059 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithTargetName regions 2024-12-10T14:55:36,072 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=29, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-10T14:55:36,081 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37707 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-12-10T14:55:36,087 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportWithTargetName from hbase:meta 2024-12-10T14:55:36,093 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportWithTargetName' descriptor. 2024-12-10T14:55:36,096 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=29, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-10T14:55:36,096 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportWithTargetName' from region states. 2024-12-10T14:55:36,096 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,,1733842512739.e2dc0ecfbe8bd1c496953eed3786cf3c.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733842536096"}]},"ts":"9223372036854775807"} 2024-12-10T14:55:36,096 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,1,1733842512739.5ecadaf55e885f390398f6ef1e12d757.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733842536096"}]},"ts":"9223372036854775807"} 2024-12-10T14:55:36,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=29 2024-12-10T14:55:36,102 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-10T14:55:36,102 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => e2dc0ecfbe8bd1c496953eed3786cf3c, NAME => 'testtb-testExportWithTargetName,,1733842512739.e2dc0ecfbe8bd1c496953eed3786cf3c.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 5ecadaf55e885f390398f6ef1e12d757, NAME => 'testtb-testExportWithTargetName,1,1733842512739.5ecadaf55e885f390398f6ef1e12d757.', STARTKEY => '1', ENDKEY => ''}] 2024-12-10T14:55:36,102 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportWithTargetName' as deleted. 2024-12-10T14:55:36,103 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733842536102"}]},"ts":"9223372036854775807"} 2024-12-10T14:55:36,105 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportWithTargetName state from META 2024-12-10T14:55:36,108 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=29, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-10T14:55:36,111 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=29, state=SUCCESS; DeleteTableProcedure table=testtb-testExportWithTargetName in 155 msec 2024-12-10T14:55:36,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=29 2024-12-10T14:55:36,299 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportWithTargetName, procId: 29 completed 2024-12-10T14:55:36,316 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithTargetName" 2024-12-10T14:55:36,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportWithTargetName 2024-12-10T14:55:36,321 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithTargetName" 2024-12-10T14:55:36,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportWithTargetName 2024-12-10T14:55:36,362 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithTargetName Thread=778 (was 726) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/db1d50717577:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x50bdc134-shared-pool-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_996159037_22 at /127.0.0.1:51646 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-1300 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_996159037_22 at /127.0.0.1:59284 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/db1d50717577:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_210965775_1 at /127.0.0.1:51628 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #0 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.read1(BufferedReader.java:213) java.base@17.0.11/java.io.BufferedReader.read(BufferedReader.java:287) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.parseExecResult(Shell.java:1295) app//org.apache.hadoop.util.Shell.runCommand(Shell.java:1054) app//org.apache.hadoop.util.Shell.run(Shell.java:959) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:1282) app//org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor.launchContainer(DefaultContainerExecutor.java:349) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.launchContainer(ContainerLaunch.java:600) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:388) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:105) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: htable-pool-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1891196410) connection to localhost/127.0.0.1:33579 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: htable-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_210965775_1 at /127.0.0.1:34886 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1891196410) connection to localhost/127.0.0.1:38701 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RS_CLOSE_REGION-regionserver/db1d50717577:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33579 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38701 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x50bdc134-shared-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/db1d50717577:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x50bdc134-shared-pool-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Container metrics unregistration java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: ApplicationMasterLauncher #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x50bdc134-shared-pool-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 22661) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_996159037_22 at /127.0.0.1:34904 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=807 (was 785) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=405 (was 237) - SystemLoadAverage LEAK? -, ProcessCount=17 (was 11) - ProcessCount LEAK? -, AvailableMemoryMB=2502 (was 3437) 2024-12-10T14:55:36,363 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=778 is superior to 500 2024-12-10T14:55:36,382 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithResetTtl Thread=778, OpenFileDescriptor=807, MaxFileDescriptor=1048576, SystemLoadAverage=405, ProcessCount=17, AvailableMemoryMB=2501 2024-12-10T14:55:36,382 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=778 is superior to 500 2024-12-10T14:55:36,385 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-10T14:55:36,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] procedure2.ProcedureExecutor(1098): Stored pid=30, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportWithResetTtl 2024-12-10T14:55:36,387 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=30, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-10T14:55:36,387 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:55:36,387 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithResetTtl" procId is: 30 2024-12-10T14:55:36,388 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=30, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-10T14:55:36,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-10T14:55:36,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741898_1074 (size=404) 2024-12-10T14:55:36,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741898_1074 (size=404) 2024-12-10T14:55:36,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741898_1074 (size=404) 2024-12-10T14:55:36,401 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 4a1aefbc6d60395d01fda31124f1765c, NAME => 'testtb-testExportWithResetTtl,,1733842536384.4a1aefbc6d60395d01fda31124f1765c.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef 2024-12-10T14:55:36,401 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 136851938ae5fccaa2ca171abc942d0e, NAME => 'testtb-testExportWithResetTtl,1,1733842536384.136851938ae5fccaa2ca171abc942d0e.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef 2024-12-10T14:55:36,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741899_1075 (size=65) 2024-12-10T14:55:36,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741899_1075 (size=65) 2024-12-10T14:55:36,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741899_1075 (size=65) 2024-12-10T14:55:36,414 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportWithResetTtl,1,1733842536384.136851938ae5fccaa2ca171abc942d0e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:55:36,414 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1681): Closing 136851938ae5fccaa2ca171abc942d0e, disabling compactions & flushes 2024-12-10T14:55:36,414 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportWithResetTtl,1,1733842536384.136851938ae5fccaa2ca171abc942d0e. 2024-12-10T14:55:36,414 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithResetTtl,1,1733842536384.136851938ae5fccaa2ca171abc942d0e. 2024-12-10T14:55:36,414 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithResetTtl,1,1733842536384.136851938ae5fccaa2ca171abc942d0e. after waiting 0 ms 2024-12-10T14:55:36,414 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithResetTtl,1,1733842536384.136851938ae5fccaa2ca171abc942d0e. 2024-12-10T14:55:36,414 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportWithResetTtl,1,1733842536384.136851938ae5fccaa2ca171abc942d0e. 2024-12-10T14:55:36,414 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1635): Region close journal for 136851938ae5fccaa2ca171abc942d0e: 2024-12-10T14:55:36,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741900_1076 (size=65) 2024-12-10T14:55:36,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741900_1076 (size=65) 2024-12-10T14:55:36,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741900_1076 (size=65) 2024-12-10T14:55:36,421 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportWithResetTtl,,1733842536384.4a1aefbc6d60395d01fda31124f1765c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:55:36,421 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1681): Closing 4a1aefbc6d60395d01fda31124f1765c, disabling compactions & flushes 2024-12-10T14:55:36,421 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportWithResetTtl,,1733842536384.4a1aefbc6d60395d01fda31124f1765c. 2024-12-10T14:55:36,421 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithResetTtl,,1733842536384.4a1aefbc6d60395d01fda31124f1765c. 2024-12-10T14:55:36,421 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithResetTtl,,1733842536384.4a1aefbc6d60395d01fda31124f1765c. after waiting 0 ms 2024-12-10T14:55:36,421 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithResetTtl,,1733842536384.4a1aefbc6d60395d01fda31124f1765c. 2024-12-10T14:55:36,421 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportWithResetTtl,,1733842536384.4a1aefbc6d60395d01fda31124f1765c. 2024-12-10T14:55:36,421 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1635): Region close journal for 4a1aefbc6d60395d01fda31124f1765c: 2024-12-10T14:55:36,423 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=30, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-12-10T14:55:36,423 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,1,1733842536384.136851938ae5fccaa2ca171abc942d0e.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733842536423"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733842536423"}]},"ts":"1733842536423"} 2024-12-10T14:55:36,423 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,,1733842536384.4a1aefbc6d60395d01fda31124f1765c.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733842536423"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733842536423"}]},"ts":"1733842536423"} 2024-12-10T14:55:36,427 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-10T14:55:36,428 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=30, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-10T14:55:36,428 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733842536428"}]},"ts":"1733842536428"} 2024-12-10T14:55:36,430 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithResetTtl, state=ENABLING in hbase:meta 2024-12-10T14:55:36,433 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(202): Hosts are {db1d50717577=0} racks are {/default-rack=0} 2024-12-10T14:55:36,434 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-10T14:55:36,434 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-10T14:55:36,434 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-10T14:55:36,434 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-10T14:55:36,434 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-10T14:55:36,434 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-10T14:55:36,434 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-10T14:55:36,435 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=31, ppid=30, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=4a1aefbc6d60395d01fda31124f1765c, ASSIGN}, {pid=32, ppid=30, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=136851938ae5fccaa2ca171abc942d0e, ASSIGN}] 2024-12-10T14:55:36,436 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=32, ppid=30, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=136851938ae5fccaa2ca171abc942d0e, ASSIGN 2024-12-10T14:55:36,436 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=31, ppid=30, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=4a1aefbc6d60395d01fda31124f1765c, ASSIGN 2024-12-10T14:55:36,437 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=32, ppid=30, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=136851938ae5fccaa2ca171abc942d0e, ASSIGN; state=OFFLINE, location=db1d50717577,45255,1733842503015; forceNewPlan=false, retain=false 2024-12-10T14:55:36,437 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=31, ppid=30, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=4a1aefbc6d60395d01fda31124f1765c, ASSIGN; state=OFFLINE, location=db1d50717577,41775,1733842502887; forceNewPlan=false, retain=false 2024-12-10T14:55:36,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-10T14:55:36,588 INFO [db1d50717577:36533 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-10T14:55:36,588 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=31 updating hbase:meta row=4a1aefbc6d60395d01fda31124f1765c, regionState=OPENING, regionLocation=db1d50717577,41775,1733842502887 2024-12-10T14:55:36,588 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=32 updating hbase:meta row=136851938ae5fccaa2ca171abc942d0e, regionState=OPENING, regionLocation=db1d50717577,45255,1733842503015 2024-12-10T14:55:36,590 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=33, ppid=31, state=RUNNABLE; OpenRegionProcedure 4a1aefbc6d60395d01fda31124f1765c, server=db1d50717577,41775,1733842502887}] 2024-12-10T14:55:36,592 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=34, ppid=32, state=RUNNABLE; OpenRegionProcedure 136851938ae5fccaa2ca171abc942d0e, server=db1d50717577,45255,1733842503015}] 2024-12-10T14:55:36,618 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-10T14:55:36,619 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38718, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-10T14:55:36,620 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-10T14:55:36,621 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38720, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-10T14:55:36,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-10T14:55:36,744 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,45255,1733842503015 2024-12-10T14:55:36,744 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,41775,1733842502887 2024-12-10T14:55:36,749 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] handler.AssignRegionHandler(135): Open testtb-testExportWithResetTtl,1,1733842536384.136851938ae5fccaa2ca171abc942d0e. 2024-12-10T14:55:36,749 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] handler.AssignRegionHandler(135): Open testtb-testExportWithResetTtl,,1733842536384.4a1aefbc6d60395d01fda31124f1765c. 2024-12-10T14:55:36,749 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(7285): Opening region: {ENCODED => 136851938ae5fccaa2ca171abc942d0e, NAME => 'testtb-testExportWithResetTtl,1,1733842536384.136851938ae5fccaa2ca171abc942d0e.', STARTKEY => '1', ENDKEY => ''} 2024-12-10T14:55:36,749 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(7285): Opening region: {ENCODED => 4a1aefbc6d60395d01fda31124f1765c, NAME => 'testtb-testExportWithResetTtl,,1733842536384.4a1aefbc6d60395d01fda31124f1765c.', STARTKEY => '', ENDKEY => '1'} 2024-12-10T14:55:36,749 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportWithResetTtl,1,1733842536384.136851938ae5fccaa2ca171abc942d0e. service=AccessControlService 2024-12-10T14:55:36,749 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-10T14:55:36,750 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportWithResetTtl,,1733842536384.4a1aefbc6d60395d01fda31124f1765c. service=AccessControlService 2024-12-10T14:55:36,750 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl 136851938ae5fccaa2ca171abc942d0e 2024-12-10T14:55:36,750 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(894): Instantiated testtb-testExportWithResetTtl,1,1733842536384.136851938ae5fccaa2ca171abc942d0e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:55:36,750 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-10T14:55:36,750 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(7327): checking encryption for 136851938ae5fccaa2ca171abc942d0e 2024-12-10T14:55:36,750 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl 4a1aefbc6d60395d01fda31124f1765c 2024-12-10T14:55:36,750 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(7330): checking classloading for 136851938ae5fccaa2ca171abc942d0e 2024-12-10T14:55:36,750 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(894): Instantiated testtb-testExportWithResetTtl,,1733842536384.4a1aefbc6d60395d01fda31124f1765c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:55:36,750 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(7327): checking encryption for 4a1aefbc6d60395d01fda31124f1765c 2024-12-10T14:55:36,750 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(7330): checking classloading for 4a1aefbc6d60395d01fda31124f1765c 2024-12-10T14:55:36,752 INFO [StoreOpener-4a1aefbc6d60395d01fda31124f1765c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 4a1aefbc6d60395d01fda31124f1765c 2024-12-10T14:55:36,752 INFO [StoreOpener-136851938ae5fccaa2ca171abc942d0e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 136851938ae5fccaa2ca171abc942d0e 2024-12-10T14:55:36,753 INFO [StoreOpener-136851938ae5fccaa2ca171abc942d0e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 136851938ae5fccaa2ca171abc942d0e columnFamilyName cf 2024-12-10T14:55:36,753 DEBUG [StoreOpener-136851938ae5fccaa2ca171abc942d0e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:55:36,754 INFO [StoreOpener-4a1aefbc6d60395d01fda31124f1765c-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4a1aefbc6d60395d01fda31124f1765c columnFamilyName cf 2024-12-10T14:55:36,754 INFO [StoreOpener-136851938ae5fccaa2ca171abc942d0e-1 {}] regionserver.HStore(327): Store=136851938ae5fccaa2ca171abc942d0e/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T14:55:36,754 DEBUG [StoreOpener-4a1aefbc6d60395d01fda31124f1765c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:55:36,755 INFO [StoreOpener-4a1aefbc6d60395d01fda31124f1765c-1 {}] regionserver.HStore(327): Store=4a1aefbc6d60395d01fda31124f1765c/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T14:55:36,755 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithResetTtl/136851938ae5fccaa2ca171abc942d0e 2024-12-10T14:55:36,755 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithResetTtl/136851938ae5fccaa2ca171abc942d0e 2024-12-10T14:55:36,756 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithResetTtl/4a1aefbc6d60395d01fda31124f1765c 2024-12-10T14:55:36,756 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithResetTtl/4a1aefbc6d60395d01fda31124f1765c 2024-12-10T14:55:36,758 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(1085): writing seq id for 136851938ae5fccaa2ca171abc942d0e 2024-12-10T14:55:36,760 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(1085): writing seq id for 4a1aefbc6d60395d01fda31124f1765c 2024-12-10T14:55:36,765 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithResetTtl/136851938ae5fccaa2ca171abc942d0e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T14:55:36,765 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(1102): Opened 136851938ae5fccaa2ca171abc942d0e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60092428, jitterRate=-0.10455304384231567}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-10T14:55:36,768 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(1001): Region open journal for 136851938ae5fccaa2ca171abc942d0e: 2024-12-10T14:55:36,769 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithResetTtl/4a1aefbc6d60395d01fda31124f1765c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T14:55:36,769 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportWithResetTtl,1,1733842536384.136851938ae5fccaa2ca171abc942d0e., pid=34, masterSystemTime=1733842536744 2024-12-10T14:55:36,770 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(1102): Opened 4a1aefbc6d60395d01fda31124f1765c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74973211, jitterRate=0.11718790233135223}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-10T14:55:36,770 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(1001): Region open journal for 4a1aefbc6d60395d01fda31124f1765c: 2024-12-10T14:55:36,772 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportWithResetTtl,,1733842536384.4a1aefbc6d60395d01fda31124f1765c., pid=33, masterSystemTime=1733842536744 2024-12-10T14:55:36,774 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=32 updating hbase:meta row=136851938ae5fccaa2ca171abc942d0e, regionState=OPEN, openSeqNum=2, regionLocation=db1d50717577,45255,1733842503015 2024-12-10T14:55:36,773 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportWithResetTtl,1,1733842536384.136851938ae5fccaa2ca171abc942d0e. 2024-12-10T14:55:36,775 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] handler.AssignRegionHandler(164): Opened testtb-testExportWithResetTtl,1,1733842536384.136851938ae5fccaa2ca171abc942d0e. 2024-12-10T14:55:36,775 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportWithResetTtl,,1733842536384.4a1aefbc6d60395d01fda31124f1765c. 2024-12-10T14:55:36,775 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] handler.AssignRegionHandler(164): Opened testtb-testExportWithResetTtl,,1733842536384.4a1aefbc6d60395d01fda31124f1765c. 2024-12-10T14:55:36,776 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=31 updating hbase:meta row=4a1aefbc6d60395d01fda31124f1765c, regionState=OPEN, openSeqNum=2, regionLocation=db1d50717577,41775,1733842502887 2024-12-10T14:55:36,780 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=34, resume processing ppid=32 2024-12-10T14:55:36,780 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=34, ppid=32, state=SUCCESS; OpenRegionProcedure 136851938ae5fccaa2ca171abc942d0e, server=db1d50717577,45255,1733842503015 in 185 msec 2024-12-10T14:55:36,781 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=33, resume processing ppid=31 2024-12-10T14:55:36,781 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=33, ppid=31, state=SUCCESS; OpenRegionProcedure 4a1aefbc6d60395d01fda31124f1765c, server=db1d50717577,41775,1733842502887 in 188 msec 2024-12-10T14:55:36,783 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=32, ppid=30, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=136851938ae5fccaa2ca171abc942d0e, ASSIGN in 345 msec 2024-12-10T14:55:36,784 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=31, resume processing ppid=30 2024-12-10T14:55:36,784 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=31, ppid=30, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=4a1aefbc6d60395d01fda31124f1765c, ASSIGN in 346 msec 2024-12-10T14:55:36,785 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=30, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-10T14:55:36,786 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733842536785"}]},"ts":"1733842536785"} 2024-12-10T14:55:36,788 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithResetTtl, state=ENABLED in hbase:meta 2024-12-10T14:55:36,791 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=30, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-12-10T14:55:36,792 DEBUG [PEWorker-2 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportWithResetTtl jenkins: RWXCA 2024-12-10T14:55:36,795 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41775 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-10T14:55:36,797 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45255-0x1019cdee8310003, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:55:36,797 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:55:36,798 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:55:36,798 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:55:36,800 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-10T14:55:36,801 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-10T14:55:36,801 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-10T14:55:36,802 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-10T14:55:36,806 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=30, state=SUCCESS; CreateTableProcedure table=testtb-testExportWithResetTtl in 417 msec 2024-12-10T14:55:36,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-10T14:55:36,994 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportWithResetTtl, procId: 30 completed 2024-12-10T14:55:36,994 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testExportWithResetTtl get assigned. Timeout = 60000ms 2024-12-10T14:55:36,994 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-10T14:55:36,999 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testExportWithResetTtl assigned to meta. Checking AM states. 2024-12-10T14:55:36,999 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-10T14:55:36,999 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testExportWithResetTtl assigned. 2024-12-10T14:55:37,004 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-10T14:55:37,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733842537004 (current time:1733842537004). 2024-12-10T14:55:37,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-10T14:55:37,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-10T14:55:37,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-10T14:55:37,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4c0b7683 to 127.0.0.1:57176 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@665d0e99 2024-12-10T14:55:37,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1d053935, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:55:37,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:55:37,012 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42708, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:55:37,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4c0b7683 to 127.0.0.1:57176 2024-12-10T14:55:37,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:55:37,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x286ab5b3 to 127.0.0.1:57176 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@26cd4fe1 2024-12-10T14:55:37,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1235169c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:55:37,029 DEBUG [hconnection-0x3ec6083f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:55:37,031 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42710, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:55:37,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:55:37,035 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45474, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:55:37,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x286ab5b3 to 127.0.0.1:57176 2024-12-10T14:55:37,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:55:37,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-10T14:55:37,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-10T14:55:37,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] procedure2.ProcedureExecutor(1098): Stored pid=35, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-10T14:55:37,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 35 2024-12-10T14:55:37,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-10T14:55:37,046 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-10T14:55:37,048 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-10T14:55:37,057 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-10T14:55:37,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741901_1077 (size=161) 2024-12-10T14:55:37,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741901_1077 (size=161) 2024-12-10T14:55:37,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741901_1077 (size=161) 2024-12-10T14:55:37,098 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-10T14:55:37,099 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=36, ppid=35, state=RUNNABLE; SnapshotRegionProcedure 4a1aefbc6d60395d01fda31124f1765c}, {pid=37, ppid=35, state=RUNNABLE; SnapshotRegionProcedure 136851938ae5fccaa2ca171abc942d0e}] 2024-12-10T14:55:37,101 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=37, ppid=35, state=RUNNABLE; SnapshotRegionProcedure 136851938ae5fccaa2ca171abc942d0e 2024-12-10T14:55:37,101 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=36, ppid=35, state=RUNNABLE; SnapshotRegionProcedure 4a1aefbc6d60395d01fda31124f1765c 2024-12-10T14:55:37,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-10T14:55:37,252 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,41775,1733842502887 2024-12-10T14:55:37,252 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,45255,1733842503015 2024-12-10T14:55:37,253 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45255 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=37 2024-12-10T14:55:37,253 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41775 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=36 2024-12-10T14:55:37,254 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1733842536384.136851938ae5fccaa2ca171abc942d0e. 2024-12-10T14:55:37,254 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1733842536384.4a1aefbc6d60395d01fda31124f1765c. 2024-12-10T14:55:37,254 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] regionserver.HRegion(2538): Flush status journal for 136851938ae5fccaa2ca171abc942d0e: 2024-12-10T14:55:37,254 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] regionserver.HRegion(2538): Flush status journal for 4a1aefbc6d60395d01fda31124f1765c: 2024-12-10T14:55:37,254 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1733842536384.136851938ae5fccaa2ca171abc942d0e. for emptySnaptb0-testExportWithResetTtl completed. 2024-12-10T14:55:37,254 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1733842536384.4a1aefbc6d60395d01fda31124f1765c. for emptySnaptb0-testExportWithResetTtl completed. 2024-12-10T14:55:37,254 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1733842536384.136851938ae5fccaa2ca171abc942d0e.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-12-10T14:55:37,254 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1733842536384.4a1aefbc6d60395d01fda31124f1765c.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-12-10T14:55:37,254 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-10T14:55:37,254 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-10T14:55:37,254 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-10T14:55:37,254 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-10T14:55:37,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741903_1079 (size=68) 2024-12-10T14:55:37,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741903_1079 (size=68) 2024-12-10T14:55:37,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741903_1079 (size=68) 2024-12-10T14:55:37,266 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1733842536384.136851938ae5fccaa2ca171abc942d0e. 2024-12-10T14:55:37,266 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=37 2024-12-10T14:55:37,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.HMaster(4106): Remote procedure done, pid=37 2024-12-10T14:55:37,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741902_1078 (size=68) 2024-12-10T14:55:37,266 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region 136851938ae5fccaa2ca171abc942d0e 2024-12-10T14:55:37,267 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=37, ppid=35, state=RUNNABLE; SnapshotRegionProcedure 136851938ae5fccaa2ca171abc942d0e 2024-12-10T14:55:37,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741902_1078 (size=68) 2024-12-10T14:55:37,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741902_1078 (size=68) 2024-12-10T14:55:37,268 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1733842536384.4a1aefbc6d60395d01fda31124f1765c. 2024-12-10T14:55:37,268 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=36 2024-12-10T14:55:37,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.HMaster(4106): Remote procedure done, pid=36 2024-12-10T14:55:37,269 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region 4a1aefbc6d60395d01fda31124f1765c 2024-12-10T14:55:37,270 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=36, ppid=35, state=RUNNABLE; SnapshotRegionProcedure 4a1aefbc6d60395d01fda31124f1765c 2024-12-10T14:55:37,270 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=37, ppid=35, state=SUCCESS; SnapshotRegionProcedure 136851938ae5fccaa2ca171abc942d0e in 169 msec 2024-12-10T14:55:37,273 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=36, resume processing ppid=35 2024-12-10T14:55:37,273 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=36, ppid=35, state=SUCCESS; SnapshotRegionProcedure 4a1aefbc6d60395d01fda31124f1765c in 171 msec 2024-12-10T14:55:37,273 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-10T14:55:37,274 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-10T14:55:37,275 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-10T14:55:37,275 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithResetTtl 2024-12-10T14:55:37,276 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl 2024-12-10T14:55:37,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741904_1080 (size=543) 2024-12-10T14:55:37,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741904_1080 (size=543) 2024-12-10T14:55:37,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741904_1080 (size=543) 2024-12-10T14:55:37,325 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-10T14:55:37,331 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-10T14:55:37,332 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/emptySnaptb0-testExportWithResetTtl 2024-12-10T14:55:37,335 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-10T14:55:37,335 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 35 2024-12-10T14:55:37,338 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=35, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 296 msec 2024-12-10T14:55:37,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-10T14:55:37,348 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl, procId: 35 completed 2024-12-10T14:55:37,366 DEBUG [htable-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:55:37,369 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45478, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:55:37,369 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45255 {}] regionserver.HRegion(8254): writing data to region testtb-testExportWithResetTtl,1,1733842536384.136851938ae5fccaa2ca171abc942d0e. with WAL disabled. Data may be lost in the event of a crash. 2024-12-10T14:55:37,370 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41775 {}] regionserver.HRegion(8254): writing data to region testtb-testExportWithResetTtl,,1733842536384.4a1aefbc6d60395d01fda31124f1765c. with WAL disabled. Data may be lost in the event of a crash. 2024-12-10T14:55:37,376 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportWithResetTtl 2024-12-10T14:55:37,377 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportWithResetTtl,,1733842536384.4a1aefbc6d60395d01fda31124f1765c. 2024-12-10T14:55:37,377 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-10T14:55:37,401 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-10T14:55:37,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733842537402 (current time:1733842537402). 2024-12-10T14:55:37,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-10T14:55:37,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-10T14:55:37,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-10T14:55:37,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x35397e2a to 127.0.0.1:57176 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3b301f42 2024-12-10T14:55:37,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1fc37260, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:55:37,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:55:37,411 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42720, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:55:37,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x35397e2a to 127.0.0.1:57176 2024-12-10T14:55:37,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:55:37,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x41882816 to 127.0.0.1:57176 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@183b74b9 2024-12-10T14:55:37,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2de31c9f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:55:37,421 DEBUG [hconnection-0x6e3e3b26-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:55:37,422 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42734, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:55:37,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:55:37,425 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45482, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:55:37,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x41882816 to 127.0.0.1:57176 2024-12-10T14:55:37,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:55:37,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-10T14:55:37,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-10T14:55:37,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] procedure2.ProcedureExecutor(1098): Stored pid=38, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-10T14:55:37,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 38 2024-12-10T14:55:37,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-12-10T14:55:37,430 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-10T14:55:37,431 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-10T14:55:37,435 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-10T14:55:37,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741905_1081 (size=156) 2024-12-10T14:55:37,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741905_1081 (size=156) 2024-12-10T14:55:37,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741905_1081 (size=156) 2024-12-10T14:55:37,450 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-10T14:55:37,450 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE; SnapshotRegionProcedure 4a1aefbc6d60395d01fda31124f1765c}, {pid=40, ppid=38, state=RUNNABLE; SnapshotRegionProcedure 136851938ae5fccaa2ca171abc942d0e}] 2024-12-10T14:55:37,452 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=39, ppid=38, state=RUNNABLE; SnapshotRegionProcedure 4a1aefbc6d60395d01fda31124f1765c 2024-12-10T14:55:37,452 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=40, ppid=38, state=RUNNABLE; SnapshotRegionProcedure 136851938ae5fccaa2ca171abc942d0e 2024-12-10T14:55:37,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-12-10T14:55:37,604 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,41775,1733842502887 2024-12-10T14:55:37,604 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,45255,1733842503015 2024-12-10T14:55:37,605 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41775 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=39 2024-12-10T14:55:37,605 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45255 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=40 2024-12-10T14:55:37,605 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1733842536384.136851938ae5fccaa2ca171abc942d0e. 2024-12-10T14:55:37,605 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1733842536384.4a1aefbc6d60395d01fda31124f1765c. 2024-12-10T14:55:37,605 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.HRegion(2837): Flushing 136851938ae5fccaa2ca171abc942d0e 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-12-10T14:55:37,605 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.HRegion(2837): Flushing 4a1aefbc6d60395d01fda31124f1765c 1/1 column families, dataSize=199 B heapSize=688 B 2024-12-10T14:55:37,626 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithResetTtl/4a1aefbc6d60395d01fda31124f1765c/.tmp/cf/36febbc65d504d67ad3cf5596d2e0aa6 is 71, key is 096eb409542ee6af60b575c32b82363c/cf:q/1733842537370/Put/seqid=0 2024-12-10T14:55:37,627 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithResetTtl/136851938ae5fccaa2ca171abc942d0e/.tmp/cf/8530d71659db419cb3cf1b4e0d51ebea is 71, key is 160ae66a708e67be0db1b14a224a0cba/cf:q/1733842537369/Put/seqid=0 2024-12-10T14:55:37,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741906_1082 (size=5286) 2024-12-10T14:55:37,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741906_1082 (size=5286) 2024-12-10T14:55:37,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741906_1082 (size=5286) 2024-12-10T14:55:37,638 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=199 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithResetTtl/4a1aefbc6d60395d01fda31124f1765c/.tmp/cf/36febbc65d504d67ad3cf5596d2e0aa6 2024-12-10T14:55:37,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741907_1083 (size=8324) 2024-12-10T14:55:37,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741907_1083 (size=8324) 2024-12-10T14:55:37,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741907_1083 (size=8324) 2024-12-10T14:55:37,644 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.06 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithResetTtl/136851938ae5fccaa2ca171abc942d0e/.tmp/cf/8530d71659db419cb3cf1b4e0d51ebea 2024-12-10T14:55:37,652 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithResetTtl/136851938ae5fccaa2ca171abc942d0e/.tmp/cf/8530d71659db419cb3cf1b4e0d51ebea as hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithResetTtl/136851938ae5fccaa2ca171abc942d0e/cf/8530d71659db419cb3cf1b4e0d51ebea 2024-12-10T14:55:37,659 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithResetTtl/136851938ae5fccaa2ca171abc942d0e/cf/8530d71659db419cb3cf1b4e0d51ebea, entries=47, sequenceid=6, filesize=8.1 K 2024-12-10T14:55:37,660 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.HRegion(3040): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for 136851938ae5fccaa2ca171abc942d0e in 55ms, sequenceid=6, compaction requested=false 2024-12-10T14:55:37,660 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithResetTtl' 2024-12-10T14:55:37,661 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.HRegion(2538): Flush status journal for 136851938ae5fccaa2ca171abc942d0e: 2024-12-10T14:55:37,661 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1733842536384.136851938ae5fccaa2ca171abc942d0e. for snaptb0-testExportWithResetTtl completed. 2024-12-10T14:55:37,662 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1733842536384.136851938ae5fccaa2ca171abc942d0e.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-12-10T14:55:37,662 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-10T14:55:37,662 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithResetTtl/136851938ae5fccaa2ca171abc942d0e/cf/8530d71659db419cb3cf1b4e0d51ebea] hfiles 2024-12-10T14:55:37,662 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithResetTtl/136851938ae5fccaa2ca171abc942d0e/cf/8530d71659db419cb3cf1b4e0d51ebea for snapshot=snaptb0-testExportWithResetTtl 2024-12-10T14:55:37,664 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithResetTtl/4a1aefbc6d60395d01fda31124f1765c/.tmp/cf/36febbc65d504d67ad3cf5596d2e0aa6 as hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithResetTtl/4a1aefbc6d60395d01fda31124f1765c/cf/36febbc65d504d67ad3cf5596d2e0aa6 2024-12-10T14:55:37,671 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithResetTtl/4a1aefbc6d60395d01fda31124f1765c/cf/36febbc65d504d67ad3cf5596d2e0aa6, entries=3, sequenceid=6, filesize=5.2 K 2024-12-10T14:55:37,672 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.HRegion(3040): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for 4a1aefbc6d60395d01fda31124f1765c in 67ms, sequenceid=6, compaction requested=false 2024-12-10T14:55:37,672 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.HRegion(2538): Flush status journal for 4a1aefbc6d60395d01fda31124f1765c: 2024-12-10T14:55:37,672 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1733842536384.4a1aefbc6d60395d01fda31124f1765c. for snaptb0-testExportWithResetTtl completed. 2024-12-10T14:55:37,672 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1733842536384.4a1aefbc6d60395d01fda31124f1765c.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-12-10T14:55:37,672 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-10T14:55:37,672 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithResetTtl/4a1aefbc6d60395d01fda31124f1765c/cf/36febbc65d504d67ad3cf5596d2e0aa6] hfiles 2024-12-10T14:55:37,672 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithResetTtl/4a1aefbc6d60395d01fda31124f1765c/cf/36febbc65d504d67ad3cf5596d2e0aa6 for snapshot=snaptb0-testExportWithResetTtl 2024-12-10T14:55:37,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741908_1084 (size=107) 2024-12-10T14:55:37,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741908_1084 (size=107) 2024-12-10T14:55:37,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741908_1084 (size=107) 2024-12-10T14:55:37,683 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1733842536384.136851938ae5fccaa2ca171abc942d0e. 2024-12-10T14:55:37,683 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=40 2024-12-10T14:55:37,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.HMaster(4106): Remote procedure done, pid=40 2024-12-10T14:55:37,684 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region 136851938ae5fccaa2ca171abc942d0e 2024-12-10T14:55:37,684 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=40, ppid=38, state=RUNNABLE; SnapshotRegionProcedure 136851938ae5fccaa2ca171abc942d0e 2024-12-10T14:55:37,689 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=40, ppid=38, state=SUCCESS; SnapshotRegionProcedure 136851938ae5fccaa2ca171abc942d0e in 237 msec 2024-12-10T14:55:37,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741909_1085 (size=107) 2024-12-10T14:55:37,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741909_1085 (size=107) 2024-12-10T14:55:37,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741909_1085 (size=107) 2024-12-10T14:55:37,698 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1733842536384.4a1aefbc6d60395d01fda31124f1765c. 2024-12-10T14:55:37,698 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=39 2024-12-10T14:55:37,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.HMaster(4106): Remote procedure done, pid=39 2024-12-10T14:55:37,698 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region 4a1aefbc6d60395d01fda31124f1765c 2024-12-10T14:55:37,698 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=39, ppid=38, state=RUNNABLE; SnapshotRegionProcedure 4a1aefbc6d60395d01fda31124f1765c 2024-12-10T14:55:37,701 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=39, resume processing ppid=38 2024-12-10T14:55:37,701 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-10T14:55:37,701 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=39, ppid=38, state=SUCCESS; SnapshotRegionProcedure 4a1aefbc6d60395d01fda31124f1765c in 250 msec 2024-12-10T14:55:37,702 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-10T14:55:37,703 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-10T14:55:37,703 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithResetTtl 2024-12-10T14:55:37,704 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl 2024-12-10T14:55:37,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741910_1086 (size=621) 2024-12-10T14:55:37,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741910_1086 (size=621) 2024-12-10T14:55:37,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741910_1086 (size=621) 2024-12-10T14:55:37,722 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-10T14:55:37,728 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-10T14:55:37,729 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/snaptb0-testExportWithResetTtl 2024-12-10T14:55:37,730 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-10T14:55:37,730 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 38 2024-12-10T14:55:37,732 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=38, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 303 msec 2024-12-10T14:55:37,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-12-10T14:55:37,733 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl, procId: 38 completed 2024-12-10T14:55:37,734 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-10T14:55:37,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] procedure2.ProcedureExecutor(1098): Stored pid=41, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testExportWithResetTtl 2024-12-10T14:55:37,736 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-10T14:55:37,736 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:55:37,736 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportWithResetTtl" procId is: 41 2024-12-10T14:55:37,737 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-10T14:55:37,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-10T14:55:37,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741911_1087 (size=397) 2024-12-10T14:55:37,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741911_1087 (size=397) 2024-12-10T14:55:37,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741911_1087 (size=397) 2024-12-10T14:55:37,749 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 8f098689ed91c97341e38a2890fcaa08, NAME => 'testExportWithResetTtl,,1733842537734.8f098689ed91c97341e38a2890fcaa08.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef 2024-12-10T14:55:37,749 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 6d19b980654796af47ae33f9a1b762f2, NAME => 'testExportWithResetTtl,1,1733842537734.6d19b980654796af47ae33f9a1b762f2.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef 2024-12-10T14:55:37,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741912_1088 (size=58) 2024-12-10T14:55:37,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741912_1088 (size=58) 2024-12-10T14:55:37,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741913_1089 (size=58) 2024-12-10T14:55:37,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741912_1088 (size=58) 2024-12-10T14:55:37,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741913_1089 (size=58) 2024-12-10T14:55:37,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741913_1089 (size=58) 2024-12-10T14:55:37,769 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(894): Instantiated testExportWithResetTtl,1,1733842537734.6d19b980654796af47ae33f9a1b762f2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:55:37,770 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1681): Closing 6d19b980654796af47ae33f9a1b762f2, disabling compactions & flushes 2024-12-10T14:55:37,770 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1703): Closing region testExportWithResetTtl,1,1733842537734.6d19b980654796af47ae33f9a1b762f2. 2024-12-10T14:55:37,770 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportWithResetTtl,1,1733842537734.6d19b980654796af47ae33f9a1b762f2. 2024-12-10T14:55:37,770 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testExportWithResetTtl,1,1733842537734.6d19b980654796af47ae33f9a1b762f2. after waiting 0 ms 2024-12-10T14:55:37,770 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testExportWithResetTtl,1,1733842537734.6d19b980654796af47ae33f9a1b762f2. 2024-12-10T14:55:37,770 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1922): Closed testExportWithResetTtl,1,1733842537734.6d19b980654796af47ae33f9a1b762f2. 2024-12-10T14:55:37,770 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1635): Region close journal for 6d19b980654796af47ae33f9a1b762f2: 2024-12-10T14:55:37,770 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(894): Instantiated testExportWithResetTtl,,1733842537734.8f098689ed91c97341e38a2890fcaa08.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:55:37,770 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1681): Closing 8f098689ed91c97341e38a2890fcaa08, disabling compactions & flushes 2024-12-10T14:55:37,770 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1703): Closing region testExportWithResetTtl,,1733842537734.8f098689ed91c97341e38a2890fcaa08. 2024-12-10T14:55:37,770 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportWithResetTtl,,1733842537734.8f098689ed91c97341e38a2890fcaa08. 2024-12-10T14:55:37,770 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testExportWithResetTtl,,1733842537734.8f098689ed91c97341e38a2890fcaa08. after waiting 0 ms 2024-12-10T14:55:37,770 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testExportWithResetTtl,,1733842537734.8f098689ed91c97341e38a2890fcaa08. 2024-12-10T14:55:37,771 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1922): Closed testExportWithResetTtl,,1733842537734.8f098689ed91c97341e38a2890fcaa08. 2024-12-10T14:55:37,771 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1635): Region close journal for 8f098689ed91c97341e38a2890fcaa08: 2024-12-10T14:55:37,772 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-12-10T14:55:37,773 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testExportWithResetTtl,1,1733842537734.6d19b980654796af47ae33f9a1b762f2.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1733842537772"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733842537772"}]},"ts":"1733842537772"} 2024-12-10T14:55:37,773 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testExportWithResetTtl,,1733842537734.8f098689ed91c97341e38a2890fcaa08.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1733842537772"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733842537772"}]},"ts":"1733842537772"} 2024-12-10T14:55:37,775 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-10T14:55:37,779 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-10T14:55:37,779 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733842537779"}]},"ts":"1733842537779"} 2024-12-10T14:55:37,782 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testExportWithResetTtl, state=ENABLING in hbase:meta 2024-12-10T14:55:37,788 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(202): Hosts are {db1d50717577=0} racks are {/default-rack=0} 2024-12-10T14:55:37,790 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-10T14:55:37,790 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-10T14:55:37,790 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-10T14:55:37,790 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-10T14:55:37,790 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-10T14:55:37,790 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-10T14:55:37,790 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-10T14:55:37,790 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportWithResetTtl, region=8f098689ed91c97341e38a2890fcaa08, ASSIGN}, {pid=43, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportWithResetTtl, region=6d19b980654796af47ae33f9a1b762f2, ASSIGN}] 2024-12-10T14:55:37,791 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=43, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportWithResetTtl, region=6d19b980654796af47ae33f9a1b762f2, ASSIGN 2024-12-10T14:55:37,791 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportWithResetTtl, region=8f098689ed91c97341e38a2890fcaa08, ASSIGN 2024-12-10T14:55:37,793 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=8f098689ed91c97341e38a2890fcaa08, ASSIGN; state=OFFLINE, location=db1d50717577,45255,1733842503015; forceNewPlan=false, retain=false 2024-12-10T14:55:37,793 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=43, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=6d19b980654796af47ae33f9a1b762f2, ASSIGN; state=OFFLINE, location=db1d50717577,37707,1733842502956; forceNewPlan=false, retain=false 2024-12-10T14:55:37,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-10T14:55:37,943 INFO [db1d50717577:36533 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-10T14:55:37,943 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=43 updating hbase:meta row=6d19b980654796af47ae33f9a1b762f2, regionState=OPENING, regionLocation=db1d50717577,37707,1733842502956 2024-12-10T14:55:37,943 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=8f098689ed91c97341e38a2890fcaa08, regionState=OPENING, regionLocation=db1d50717577,45255,1733842503015 2024-12-10T14:55:37,946 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=44, ppid=43, state=RUNNABLE; OpenRegionProcedure 6d19b980654796af47ae33f9a1b762f2, server=db1d50717577,37707,1733842502956}] 2024-12-10T14:55:37,947 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=45, ppid=42, state=RUNNABLE; OpenRegionProcedure 8f098689ed91c97341e38a2890fcaa08, server=db1d50717577,45255,1733842503015}] 2024-12-10T14:55:38,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-10T14:55:38,099 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,37707,1733842502956 2024-12-10T14:55:38,100 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,45255,1733842503015 2024-12-10T14:55:38,103 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] handler.AssignRegionHandler(135): Open testExportWithResetTtl,,1733842537734.8f098689ed91c97341e38a2890fcaa08. 2024-12-10T14:55:38,104 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(7285): Opening region: {ENCODED => 8f098689ed91c97341e38a2890fcaa08, NAME => 'testExportWithResetTtl,,1733842537734.8f098689ed91c97341e38a2890fcaa08.', STARTKEY => '', ENDKEY => '1'} 2024-12-10T14:55:38,104 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] handler.AssignRegionHandler(135): Open testExportWithResetTtl,1,1733842537734.6d19b980654796af47ae33f9a1b762f2. 2024-12-10T14:55:38,104 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7285): Opening region: {ENCODED => 6d19b980654796af47ae33f9a1b762f2, NAME => 'testExportWithResetTtl,1,1733842537734.6d19b980654796af47ae33f9a1b762f2.', STARTKEY => '1', ENDKEY => ''} 2024-12-10T14:55:38,104 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7999): Registered coprocessor service: region=testExportWithResetTtl,1,1733842537734.6d19b980654796af47ae33f9a1b762f2. service=AccessControlService 2024-12-10T14:55:38,104 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(7999): Registered coprocessor service: region=testExportWithResetTtl,,1733842537734.8f098689ed91c97341e38a2890fcaa08. service=AccessControlService 2024-12-10T14:55:38,104 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-10T14:55:38,104 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-10T14:55:38,105 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl 6d19b980654796af47ae33f9a1b762f2 2024-12-10T14:55:38,105 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(894): Instantiated testExportWithResetTtl,1,1733842537734.6d19b980654796af47ae33f9a1b762f2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:55:38,105 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl 8f098689ed91c97341e38a2890fcaa08 2024-12-10T14:55:38,105 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(894): Instantiated testExportWithResetTtl,,1733842537734.8f098689ed91c97341e38a2890fcaa08.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:55:38,105 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7327): checking encryption for 6d19b980654796af47ae33f9a1b762f2 2024-12-10T14:55:38,105 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7330): checking classloading for 6d19b980654796af47ae33f9a1b762f2 2024-12-10T14:55:38,105 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(7327): checking encryption for 8f098689ed91c97341e38a2890fcaa08 2024-12-10T14:55:38,105 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(7330): checking classloading for 8f098689ed91c97341e38a2890fcaa08 2024-12-10T14:55:38,106 INFO [StoreOpener-8f098689ed91c97341e38a2890fcaa08-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 8f098689ed91c97341e38a2890fcaa08 2024-12-10T14:55:38,106 INFO [StoreOpener-6d19b980654796af47ae33f9a1b762f2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 6d19b980654796af47ae33f9a1b762f2 2024-12-10T14:55:38,108 INFO [StoreOpener-6d19b980654796af47ae33f9a1b762f2-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6d19b980654796af47ae33f9a1b762f2 columnFamilyName cf 2024-12-10T14:55:38,108 INFO [StoreOpener-8f098689ed91c97341e38a2890fcaa08-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8f098689ed91c97341e38a2890fcaa08 columnFamilyName cf 2024-12-10T14:55:38,108 DEBUG [StoreOpener-6d19b980654796af47ae33f9a1b762f2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:55:38,108 DEBUG [StoreOpener-8f098689ed91c97341e38a2890fcaa08-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:55:38,109 INFO [StoreOpener-6d19b980654796af47ae33f9a1b762f2-1 {}] regionserver.HStore(327): Store=6d19b980654796af47ae33f9a1b762f2/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T14:55:38,109 INFO [StoreOpener-8f098689ed91c97341e38a2890fcaa08-1 {}] regionserver.HStore(327): Store=8f098689ed91c97341e38a2890fcaa08/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T14:55:38,110 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testExportWithResetTtl/6d19b980654796af47ae33f9a1b762f2 2024-12-10T14:55:38,110 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testExportWithResetTtl/8f098689ed91c97341e38a2890fcaa08 2024-12-10T14:55:38,110 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testExportWithResetTtl/6d19b980654796af47ae33f9a1b762f2 2024-12-10T14:55:38,110 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testExportWithResetTtl/8f098689ed91c97341e38a2890fcaa08 2024-12-10T14:55:38,112 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1085): writing seq id for 6d19b980654796af47ae33f9a1b762f2 2024-12-10T14:55:38,113 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(1085): writing seq id for 8f098689ed91c97341e38a2890fcaa08 2024-12-10T14:55:38,126 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testExportWithResetTtl/6d19b980654796af47ae33f9a1b762f2/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T14:55:38,126 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testExportWithResetTtl/8f098689ed91c97341e38a2890fcaa08/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T14:55:38,127 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1102): Opened 6d19b980654796af47ae33f9a1b762f2; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61096484, jitterRate=-0.08959144353866577}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-10T14:55:38,127 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(1102): Opened 8f098689ed91c97341e38a2890fcaa08; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69469028, jitterRate=0.03516918420791626}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-10T14:55:38,128 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(1001): Region open journal for 8f098689ed91c97341e38a2890fcaa08: 2024-12-10T14:55:38,128 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1001): Region open journal for 6d19b980654796af47ae33f9a1b762f2: 2024-12-10T14:55:38,129 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegionServer(2601): Post open deploy tasks for testExportWithResetTtl,1,1733842537734.6d19b980654796af47ae33f9a1b762f2., pid=44, masterSystemTime=1733842538099 2024-12-10T14:55:38,129 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegionServer(2601): Post open deploy tasks for testExportWithResetTtl,,1733842537734.8f098689ed91c97341e38a2890fcaa08., pid=45, masterSystemTime=1733842538100 2024-12-10T14:55:38,130 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegionServer(2628): Finished post open deploy task for testExportWithResetTtl,,1733842537734.8f098689ed91c97341e38a2890fcaa08. 2024-12-10T14:55:38,131 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] handler.AssignRegionHandler(164): Opened testExportWithResetTtl,,1733842537734.8f098689ed91c97341e38a2890fcaa08. 2024-12-10T14:55:38,131 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=8f098689ed91c97341e38a2890fcaa08, regionState=OPEN, openSeqNum=2, regionLocation=db1d50717577,45255,1733842503015 2024-12-10T14:55:38,131 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegionServer(2628): Finished post open deploy task for testExportWithResetTtl,1,1733842537734.6d19b980654796af47ae33f9a1b762f2. 2024-12-10T14:55:38,131 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] handler.AssignRegionHandler(164): Opened testExportWithResetTtl,1,1733842537734.6d19b980654796af47ae33f9a1b762f2. 2024-12-10T14:55:38,132 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=43 updating hbase:meta row=6d19b980654796af47ae33f9a1b762f2, regionState=OPEN, openSeqNum=2, regionLocation=db1d50717577,37707,1733842502956 2024-12-10T14:55:38,135 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=45, resume processing ppid=42 2024-12-10T14:55:38,135 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=45, ppid=42, state=SUCCESS; OpenRegionProcedure 8f098689ed91c97341e38a2890fcaa08, server=db1d50717577,45255,1733842503015 in 186 msec 2024-12-10T14:55:38,136 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=44, resume processing ppid=43 2024-12-10T14:55:38,136 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=44, ppid=43, state=SUCCESS; OpenRegionProcedure 6d19b980654796af47ae33f9a1b762f2, server=db1d50717577,37707,1733842502956 in 189 msec 2024-12-10T14:55:38,137 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=42, ppid=41, state=SUCCESS; TransitRegionStateProcedure table=testExportWithResetTtl, region=8f098689ed91c97341e38a2890fcaa08, ASSIGN in 345 msec 2024-12-10T14:55:38,139 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=43, resume processing ppid=41 2024-12-10T14:55:38,139 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=43, ppid=41, state=SUCCESS; TransitRegionStateProcedure table=testExportWithResetTtl, region=6d19b980654796af47ae33f9a1b762f2, ASSIGN in 346 msec 2024-12-10T14:55:38,139 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-10T14:55:38,139 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733842538139"}]},"ts":"1733842538139"} 2024-12-10T14:55:38,141 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testExportWithResetTtl, state=ENABLED in hbase:meta 2024-12-10T14:55:38,144 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-12-10T14:55:38,144 DEBUG [PEWorker-2 {}] access.PermissionStorage(175): Writing permission with rowKey testExportWithResetTtl jenkins: RWXCA 2024-12-10T14:55:38,147 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41775 {}] access.PermissionStorage(611): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-10T14:55:38,149 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:55:38,149 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:55:38,150 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45255-0x1019cdee8310003, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:55:38,150 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:55:38,152 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-10T14:55:38,152 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-10T14:55:38,152 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-10T14:55:38,152 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-10T14:55:38,153 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-10T14:55:38,153 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-10T14:55:38,153 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-10T14:55:38,153 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=41, state=SUCCESS; CreateTableProcedure table=testExportWithResetTtl in 417 msec 2024-12-10T14:55:38,153 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-10T14:55:38,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-10T14:55:38,341 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testExportWithResetTtl, procId: 41 completed 2024-12-10T14:55:38,341 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testExportWithResetTtl get assigned. Timeout = 60000ms 2024-12-10T14:55:38,341 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-10T14:55:38,345 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testExportWithResetTtl assigned to meta. Checking AM states. 2024-12-10T14:55:38,345 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-10T14:55:38,345 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testExportWithResetTtl assigned. 2024-12-10T14:55:38,354 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45255 {}] regionserver.HRegion(8254): writing data to region testExportWithResetTtl,,1733842537734.8f098689ed91c97341e38a2890fcaa08. with WAL disabled. Data may be lost in the event of a crash. 2024-12-10T14:55:38,356 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37707 {}] regionserver.HRegion(8254): writing data to region testExportWithResetTtl,1,1733842537734.6d19b980654796af47ae33f9a1b762f2. with WAL disabled. Data may be lost in the event of a crash. 2024-12-10T14:55:38,360 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testExportWithResetTtl 2024-12-10T14:55:38,360 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testExportWithResetTtl,,1733842537734.8f098689ed91c97341e38a2890fcaa08. 2024-12-10T14:55:38,361 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-10T14:55:38,375 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-12-10T14:55:38,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733842538375 (current time:1733842538375). 2024-12-10T14:55:38,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-10T14:55:38,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-10T14:55:38,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x51439dc7 to 127.0.0.1:57176 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7f875016 2024-12-10T14:55:38,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7362a59a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:55:38,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:55:38,382 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42748, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:55:38,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x51439dc7 to 127.0.0.1:57176 2024-12-10T14:55:38,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:55:38,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6a39fcff to 127.0.0.1:57176 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2f7b064e 2024-12-10T14:55:38,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4953284e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:55:38,391 DEBUG [hconnection-0x2161ab52-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:55:38,392 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42764, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:55:38,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:55:38,397 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45490, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:55:38,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6a39fcff to 127.0.0.1:57176 2024-12-10T14:55:38,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:55:38,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] access.PermissionStorage(611): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-10T14:55:38,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-10T14:55:38,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] procedure2.ProcedureExecutor(1098): Stored pid=46, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-12-10T14:55:38,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 46 2024-12-10T14:55:38,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=46 2024-12-10T14:55:38,403 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PREPARE 2024-12-10T14:55:38,407 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-10T14:55:38,410 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-10T14:55:38,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741914_1090 (size=143) 2024-12-10T14:55:38,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741914_1090 (size=143) 2024-12-10T14:55:38,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741914_1090 (size=143) 2024-12-10T14:55:38,441 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-10T14:55:38,441 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=47, ppid=46, state=RUNNABLE; SnapshotRegionProcedure 8f098689ed91c97341e38a2890fcaa08}, {pid=48, ppid=46, state=RUNNABLE; SnapshotRegionProcedure 6d19b980654796af47ae33f9a1b762f2}] 2024-12-10T14:55:38,443 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=47, ppid=46, state=RUNNABLE; SnapshotRegionProcedure 8f098689ed91c97341e38a2890fcaa08 2024-12-10T14:55:38,443 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=48, ppid=46, state=RUNNABLE; SnapshotRegionProcedure 6d19b980654796af47ae33f9a1b762f2 2024-12-10T14:55:38,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=46 2024-12-10T14:55:38,594 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,45255,1733842503015 2024-12-10T14:55:38,594 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,37707,1733842502956 2024-12-10T14:55:38,595 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45255 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=47 2024-12-10T14:55:38,595 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37707 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=48 2024-12-10T14:55:38,595 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,,1733842537734.8f098689ed91c97341e38a2890fcaa08. 2024-12-10T14:55:38,595 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,1,1733842537734.6d19b980654796af47ae33f9a1b762f2. 2024-12-10T14:55:38,595 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.HRegion(2837): Flushing 8f098689ed91c97341e38a2890fcaa08 1/1 column families, dataSize=333 B heapSize=976 B 2024-12-10T14:55:38,596 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.HRegion(2837): Flushing 6d19b980654796af47ae33f9a1b762f2 1/1 column families, dataSize=2.93 KB heapSize=6.58 KB 2024-12-10T14:55:38,613 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testExportWithResetTtl/8f098689ed91c97341e38a2890fcaa08/.tmp/cf/ae616ff09e0b4c17940e36de6d2b36c3 is 71, key is 0661d7924cb2b08bd5f774e93772cf21/cf:q/1733842538354/Put/seqid=0 2024-12-10T14:55:38,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741915_1091 (size=5424) 2024-12-10T14:55:38,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741915_1091 (size=5424) 2024-12-10T14:55:38,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741915_1091 (size=5424) 2024-12-10T14:55:38,621 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=333 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testExportWithResetTtl/8f098689ed91c97341e38a2890fcaa08/.tmp/cf/ae616ff09e0b4c17940e36de6d2b36c3 2024-12-10T14:55:38,622 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testExportWithResetTtl/6d19b980654796af47ae33f9a1b762f2/.tmp/cf/cf2defb6361041379e8848bf00ad8f57 is 71, key is 151df2023f82560c386d042b673fd5bf/cf:q/1733842538356/Put/seqid=0 2024-12-10T14:55:38,628 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testExportWithResetTtl/8f098689ed91c97341e38a2890fcaa08/.tmp/cf/ae616ff09e0b4c17940e36de6d2b36c3 as hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testExportWithResetTtl/8f098689ed91c97341e38a2890fcaa08/cf/ae616ff09e0b4c17940e36de6d2b36c3 2024-12-10T14:55:38,635 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testExportWithResetTtl/8f098689ed91c97341e38a2890fcaa08/cf/ae616ff09e0b4c17940e36de6d2b36c3, entries=5, sequenceid=5, filesize=5.3 K 2024-12-10T14:55:38,635 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.HRegion(3040): Finished flush of dataSize ~333 B/333, heapSize ~960 B/960, currentSize=0 B/0 for 8f098689ed91c97341e38a2890fcaa08 in 40ms, sequenceid=5, compaction requested=false 2024-12-10T14:55:38,636 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportWithResetTtl' 2024-12-10T14:55:38,636 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.HRegion(2538): Flush status journal for 8f098689ed91c97341e38a2890fcaa08: 2024-12-10T14:55:38,636 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,,1733842537734.8f098689ed91c97341e38a2890fcaa08. for snaptb-testExportWithResetTtl completed. 2024-12-10T14:55:38,636 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,,1733842537734.8f098689ed91c97341e38a2890fcaa08.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-12-10T14:55:38,637 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-10T14:55:38,637 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testExportWithResetTtl/8f098689ed91c97341e38a2890fcaa08/cf/ae616ff09e0b4c17940e36de6d2b36c3] hfiles 2024-12-10T14:55:38,637 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testExportWithResetTtl/8f098689ed91c97341e38a2890fcaa08/cf/ae616ff09e0b4c17940e36de6d2b36c3 for snapshot=snaptb-testExportWithResetTtl 2024-12-10T14:55:38,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741916_1092 (size=8188) 2024-12-10T14:55:38,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741916_1092 (size=8188) 2024-12-10T14:55:38,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741916_1092 (size=8188) 2024-12-10T14:55:38,639 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.93 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testExportWithResetTtl/6d19b980654796af47ae33f9a1b762f2/.tmp/cf/cf2defb6361041379e8848bf00ad8f57 2024-12-10T14:55:38,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741917_1093 (size=100) 2024-12-10T14:55:38,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741917_1093 (size=100) 2024-12-10T14:55:38,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741917_1093 (size=100) 2024-12-10T14:55:38,647 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testExportWithResetTtl/6d19b980654796af47ae33f9a1b762f2/.tmp/cf/cf2defb6361041379e8848bf00ad8f57 as hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testExportWithResetTtl/6d19b980654796af47ae33f9a1b762f2/cf/cf2defb6361041379e8848bf00ad8f57 2024-12-10T14:55:38,649 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,,1733842537734.8f098689ed91c97341e38a2890fcaa08. 2024-12-10T14:55:38,649 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=47 2024-12-10T14:55:38,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.HMaster(4106): Remote procedure done, pid=47 2024-12-10T14:55:38,649 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region 8f098689ed91c97341e38a2890fcaa08 2024-12-10T14:55:38,650 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=47, ppid=46, state=RUNNABLE; SnapshotRegionProcedure 8f098689ed91c97341e38a2890fcaa08 2024-12-10T14:55:38,653 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=47, ppid=46, state=SUCCESS; SnapshotRegionProcedure 8f098689ed91c97341e38a2890fcaa08 in 210 msec 2024-12-10T14:55:38,656 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testExportWithResetTtl/6d19b980654796af47ae33f9a1b762f2/cf/cf2defb6361041379e8848bf00ad8f57, entries=45, sequenceid=5, filesize=8.0 K 2024-12-10T14:55:38,657 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.HRegion(3040): Finished flush of dataSize ~2.93 KB/3003, heapSize ~6.56 KB/6720, currentSize=0 B/0 for 6d19b980654796af47ae33f9a1b762f2 in 62ms, sequenceid=5, compaction requested=false 2024-12-10T14:55:38,657 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.HRegion(2538): Flush status journal for 6d19b980654796af47ae33f9a1b762f2: 2024-12-10T14:55:38,657 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,1,1733842537734.6d19b980654796af47ae33f9a1b762f2. for snaptb-testExportWithResetTtl completed. 2024-12-10T14:55:38,657 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,1,1733842537734.6d19b980654796af47ae33f9a1b762f2.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-12-10T14:55:38,657 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-10T14:55:38,657 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testExportWithResetTtl/6d19b980654796af47ae33f9a1b762f2/cf/cf2defb6361041379e8848bf00ad8f57] hfiles 2024-12-10T14:55:38,658 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testExportWithResetTtl/6d19b980654796af47ae33f9a1b762f2/cf/cf2defb6361041379e8848bf00ad8f57 for snapshot=snaptb-testExportWithResetTtl 2024-12-10T14:55:38,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741918_1094 (size=100) 2024-12-10T14:55:38,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741918_1094 (size=100) 2024-12-10T14:55:38,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741918_1094 (size=100) 2024-12-10T14:55:38,675 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,1,1733842537734.6d19b980654796af47ae33f9a1b762f2. 2024-12-10T14:55:38,675 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=48 2024-12-10T14:55:38,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.HMaster(4106): Remote procedure done, pid=48 2024-12-10T14:55:38,676 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region 6d19b980654796af47ae33f9a1b762f2 2024-12-10T14:55:38,676 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=48, ppid=46, state=RUNNABLE; SnapshotRegionProcedure 6d19b980654796af47ae33f9a1b762f2 2024-12-10T14:55:38,679 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=48, resume processing ppid=46 2024-12-10T14:55:38,679 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=48, ppid=46, state=SUCCESS; SnapshotRegionProcedure 6d19b980654796af47ae33f9a1b762f2 in 236 msec 2024-12-10T14:55:38,679 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-10T14:55:38,680 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-10T14:55:38,681 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-10T14:55:38,681 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb-testExportWithResetTtl 2024-12-10T14:55:38,682 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-10T14:55:38,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741919_1095 (size=600) 2024-12-10T14:55:38,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741919_1095 (size=600) 2024-12-10T14:55:38,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741919_1095 (size=600) 2024-12-10T14:55:38,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=46 2024-12-10T14:55:38,707 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-10T14:55:38,714 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-10T14:55:38,715 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-10T14:55:38,717 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_POST_OPERATION 2024-12-10T14:55:38,717 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 46 2024-12-10T14:55:38,718 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=46, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } in 318 msec 2024-12-10T14:55:39,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=46 2024-12-10T14:55:39,006 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testExportWithResetTtl, procId: 46 completed 2024-12-10T14:55:39,017 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842539017 2024-12-10T14:55:39,017 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:36449, tgtDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842539017, rawTgtDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842539017, srcFsUri=hdfs://localhost:36449, srcDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef 2024-12-10T14:55:39,047 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:36449, inputRoot=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef 2024-12-10T14:55:39,047 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_996159037_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842539017, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842539017/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-10T14:55:39,050 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-10T14:55:39,055 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/snaptb-testExportWithResetTtl to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842539017/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-10T14:55:39,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741920_1096 (size=143) 2024-12-10T14:55:39,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741920_1096 (size=143) 2024-12-10T14:55:39,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741920_1096 (size=143) 2024-12-10T14:55:39,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741921_1097 (size=600) 2024-12-10T14:55:39,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741921_1097 (size=600) 2024-12-10T14:55:39,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741921_1097 (size=600) 2024-12-10T14:55:39,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741922_1098 (size=141) 2024-12-10T14:55:39,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741922_1098 (size=141) 2024-12-10T14:55:39,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741922_1098 (size=141) 2024-12-10T14:55:39,489 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-10T14:55:39,489 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-10T14:55:39,490 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-10T14:55:39,490 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-10T14:55:40,063 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733842510486_0001_000001 (auth:SIMPLE) from 127.0.0.1:53510 2024-12-10T14:55:40,075 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-0_1/usercache/jenkins/appcache/application_1733842510486_0001/container_1733842510486_0001_01_000001/launch_container.sh] 2024-12-10T14:55:40,075 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-0_1/usercache/jenkins/appcache/application_1733842510486_0001/container_1733842510486_0001_01_000001/container_tokens] 2024-12-10T14:55:40,076 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-0_1/usercache/jenkins/appcache/application_1733842510486_0001/container_1733842510486_0001_01_000001/sysfs] 2024-12-10T14:55:40,658 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/hadoop-1380599137083201307.jar 2024-12-10T14:55:40,659 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-10T14:55:40,659 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-10T14:55:40,743 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/hadoop-12132014870020367995.jar 2024-12-10T14:55:40,744 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-10T14:55:40,745 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-10T14:55:40,745 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-10T14:55:40,746 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-10T14:55:40,746 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-10T14:55:40,746 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-10T14:55:40,747 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-10T14:55:40,747 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-10T14:55:40,748 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-10T14:55:40,748 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-10T14:55:40,748 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-10T14:55:40,749 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-10T14:55:40,749 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-10T14:55:40,750 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-10T14:55:40,750 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-10T14:55:40,750 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-10T14:55:40,751 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-10T14:55:40,751 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-10T14:55:40,752 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-10T14:55:40,752 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-10T14:55:40,753 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-10T14:55:40,753 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-10T14:55:40,753 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-10T14:55:40,754 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-10T14:55:40,755 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-10T14:55:40,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741923_1099 (size=127628) 2024-12-10T14:55:40,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741923_1099 (size=127628) 2024-12-10T14:55:40,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741923_1099 (size=127628) 2024-12-10T14:55:40,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741924_1100 (size=2172101) 2024-12-10T14:55:40,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741924_1100 (size=2172101) 2024-12-10T14:55:40,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741924_1100 (size=2172101) 2024-12-10T14:55:40,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741925_1101 (size=213228) 2024-12-10T14:55:40,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741925_1101 (size=213228) 2024-12-10T14:55:40,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741925_1101 (size=213228) 2024-12-10T14:55:40,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741926_1102 (size=1877034) 2024-12-10T14:55:40,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741926_1102 (size=1877034) 2024-12-10T14:55:40,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741926_1102 (size=1877034) 2024-12-10T14:55:40,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741927_1103 (size=533455) 2024-12-10T14:55:40,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741927_1103 (size=533455) 2024-12-10T14:55:41,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741927_1103 (size=533455) 2024-12-10T14:55:41,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741928_1104 (size=7280644) 2024-12-10T14:55:41,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741928_1104 (size=7280644) 2024-12-10T14:55:41,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741928_1104 (size=7280644) 2024-12-10T14:55:41,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741929_1105 (size=4188619) 2024-12-10T14:55:41,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741929_1105 (size=4188619) 2024-12-10T14:55:41,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741929_1105 (size=4188619) 2024-12-10T14:55:41,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741930_1106 (size=20406) 2024-12-10T14:55:41,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741930_1106 (size=20406) 2024-12-10T14:55:41,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741930_1106 (size=20406) 2024-12-10T14:55:41,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741931_1107 (size=75495) 2024-12-10T14:55:41,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741931_1107 (size=75495) 2024-12-10T14:55:41,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741931_1107 (size=75495) 2024-12-10T14:55:41,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741932_1108 (size=45609) 2024-12-10T14:55:41,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741932_1108 (size=45609) 2024-12-10T14:55:41,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741932_1108 (size=45609) 2024-12-10T14:55:41,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741933_1109 (size=110084) 2024-12-10T14:55:41,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741933_1109 (size=110084) 2024-12-10T14:55:41,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741933_1109 (size=110084) 2024-12-10T14:55:41,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741934_1110 (size=1323991) 2024-12-10T14:55:41,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741934_1110 (size=1323991) 2024-12-10T14:55:41,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741934_1110 (size=1323991) 2024-12-10T14:55:41,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741935_1111 (size=23076) 2024-12-10T14:55:41,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741935_1111 (size=23076) 2024-12-10T14:55:41,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741935_1111 (size=23076) 2024-12-10T14:55:41,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741936_1112 (size=126803) 2024-12-10T14:55:41,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741936_1112 (size=126803) 2024-12-10T14:55:41,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741936_1112 (size=126803) 2024-12-10T14:55:41,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741937_1113 (size=322274) 2024-12-10T14:55:41,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741937_1113 (size=322274) 2024-12-10T14:55:41,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741937_1113 (size=322274) 2024-12-10T14:55:41,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741938_1114 (size=1832290) 2024-12-10T14:55:41,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741938_1114 (size=1832290) 2024-12-10T14:55:41,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741938_1114 (size=1832290) 2024-12-10T14:55:41,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741939_1115 (size=451756) 2024-12-10T14:55:41,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741939_1115 (size=451756) 2024-12-10T14:55:41,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741939_1115 (size=451756) 2024-12-10T14:55:41,406 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-10T14:55:41,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741940_1116 (size=30081) 2024-12-10T14:55:41,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741940_1116 (size=30081) 2024-12-10T14:55:41,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741940_1116 (size=30081) 2024-12-10T14:55:42,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741941_1117 (size=53616) 2024-12-10T14:55:42,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741941_1117 (size=53616) 2024-12-10T14:55:42,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741941_1117 (size=53616) 2024-12-10T14:55:42,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741942_1118 (size=29229) 2024-12-10T14:55:42,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741942_1118 (size=29229) 2024-12-10T14:55:42,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741942_1118 (size=29229) 2024-12-10T14:55:42,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741943_1119 (size=169089) 2024-12-10T14:55:42,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741943_1119 (size=169089) 2024-12-10T14:55:42,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741943_1119 (size=169089) 2024-12-10T14:55:42,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741944_1120 (size=5175431) 2024-12-10T14:55:42,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741944_1120 (size=5175431) 2024-12-10T14:55:42,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741944_1120 (size=5175431) 2024-12-10T14:55:42,592 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-12-10T14:55:42,593 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-12-10T14:55:42,594 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-12-10T14:55:42,594 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-12-10T14:55:42,595 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-12-10T14:55:42,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741945_1121 (size=136454) 2024-12-10T14:55:42,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741945_1121 (size=136454) 2024-12-10T14:55:42,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741945_1121 (size=136454) 2024-12-10T14:55:43,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741946_1122 (size=6350861) 2024-12-10T14:55:43,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741946_1122 (size=6350861) 2024-12-10T14:55:43,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741946_1122 (size=6350861) 2024-12-10T14:55:43,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741947_1123 (size=907858) 2024-12-10T14:55:43,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741947_1123 (size=907858) 2024-12-10T14:55:43,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741947_1123 (size=907858) 2024-12-10T14:55:43,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741948_1124 (size=3317408) 2024-12-10T14:55:43,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741948_1124 (size=3317408) 2024-12-10T14:55:43,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741948_1124 (size=3317408) 2024-12-10T14:55:43,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741949_1125 (size=503880) 2024-12-10T14:55:43,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741949_1125 (size=503880) 2024-12-10T14:55:43,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741949_1125 (size=503880) 2024-12-10T14:55:43,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741950_1126 (size=4695811) 2024-12-10T14:55:43,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741950_1126 (size=4695811) 2024-12-10T14:55:43,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741950_1126 (size=4695811) 2024-12-10T14:55:43,219 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-10T14:55:43,222 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb-testExportWithResetTtl' hfile list 2024-12-10T14:55:43,225 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K 2024-12-10T14:55:43,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741951_1127 (size=324) 2024-12-10T14:55:43,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741951_1127 (size=324) 2024-12-10T14:55:43,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741951_1127 (size=324) 2024-12-10T14:55:43,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741952_1128 (size=15) 2024-12-10T14:55:43,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741952_1128 (size=15) 2024-12-10T14:55:43,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741952_1128 (size=15) 2024-12-10T14:55:43,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741953_1129 (size=304879) 2024-12-10T14:55:43,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741953_1129 (size=304879) 2024-12-10T14:55:43,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741953_1129 (size=304879) 2024-12-10T14:55:43,310 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-10T14:55:43,310 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-10T14:55:43,722 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733842510486_0002_000001 (auth:SIMPLE) from 127.0.0.1:58550 2024-12-10T14:55:48,098 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-10T14:55:48,335 INFO [master/db1d50717577:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-10T14:55:48,335 INFO [master/db1d50717577:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-10T14:55:50,650 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733842510486_0002_000001 (auth:SIMPLE) from 127.0.0.1:56774 2024-12-10T14:55:50,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741954_1130 (size=350553) 2024-12-10T14:55:50,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741954_1130 (size=350553) 2024-12-10T14:55:50,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741954_1130 (size=350553) 2024-12-10T14:55:52,894 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733842510486_0002_000001 (auth:SIMPLE) from 127.0.0.1:60548 2024-12-10T14:55:56,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741955_1131 (size=8188) 2024-12-10T14:55:56,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741955_1131 (size=8188) 2024-12-10T14:55:56,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741955_1131 (size=8188) 2024-12-10T14:55:56,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741956_1132 (size=5424) 2024-12-10T14:55:56,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741956_1132 (size=5424) 2024-12-10T14:55:56,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741956_1132 (size=5424) 2024-12-10T14:55:56,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741957_1133 (size=17398) 2024-12-10T14:55:56,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741957_1133 (size=17398) 2024-12-10T14:55:56,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741957_1133 (size=17398) 2024-12-10T14:55:56,703 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-1_0/usercache/jenkins/appcache/application_1733842510486_0002/container_1733842510486_0002_01_000002/launch_container.sh] 2024-12-10T14:55:56,703 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-1_0/usercache/jenkins/appcache/application_1733842510486_0002/container_1733842510486_0002_01_000002/container_tokens] 2024-12-10T14:55:56,703 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-1_0/usercache/jenkins/appcache/application_1733842510486_0002/container_1733842510486_0002_01_000002/sysfs] 2024-12-10T14:55:56,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741958_1134 (size=461) 2024-12-10T14:55:56,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741958_1134 (size=461) 2024-12-10T14:55:56,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741958_1134 (size=461) 2024-12-10T14:55:56,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741959_1135 (size=17398) 2024-12-10T14:55:56,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741959_1135 (size=17398) 2024-12-10T14:55:56,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741959_1135 (size=17398) 2024-12-10T14:55:56,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741960_1136 (size=350553) 2024-12-10T14:55:56,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741960_1136 (size=350553) 2024-12-10T14:55:56,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741960_1136 (size=350553) 2024-12-10T14:55:58,478 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-10T14:55:58,480 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-10T14:55:58,486 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb-testExportWithResetTtl 2024-12-10T14:55:58,486 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-10T14:55:58,487 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-10T14:55:58,487 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_996159037_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-10T14:55:58,488 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-12-10T14:55:58,488 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-12-10T14:55:58,488 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_996159037_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842539017/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842539017/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-10T14:55:58,488 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842539017/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-12-10T14:55:58,488 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842539017/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-12-10T14:55:58,495 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testExportWithResetTtl 2024-12-10T14:55:58,495 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testExportWithResetTtl 2024-12-10T14:55:58,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] procedure2.ProcedureExecutor(1098): Stored pid=49, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testExportWithResetTtl 2024-12-10T14:55:58,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-10T14:55:58,499 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733842558498"}]},"ts":"1733842558498"} 2024-12-10T14:55:58,500 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testExportWithResetTtl, state=DISABLING in hbase:meta 2024-12-10T14:55:58,502 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testExportWithResetTtl to state=DISABLING 2024-12-10T14:55:58,503 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=50, ppid=49, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testExportWithResetTtl}] 2024-12-10T14:55:58,504 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=51, ppid=50, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testExportWithResetTtl, region=8f098689ed91c97341e38a2890fcaa08, UNASSIGN}, {pid=52, ppid=50, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testExportWithResetTtl, region=6d19b980654796af47ae33f9a1b762f2, UNASSIGN}] 2024-12-10T14:55:58,505 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=52, ppid=50, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testExportWithResetTtl, region=6d19b980654796af47ae33f9a1b762f2, UNASSIGN 2024-12-10T14:55:58,505 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=51, ppid=50, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testExportWithResetTtl, region=8f098689ed91c97341e38a2890fcaa08, UNASSIGN 2024-12-10T14:55:58,506 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=52 updating hbase:meta row=6d19b980654796af47ae33f9a1b762f2, regionState=CLOSING, regionLocation=db1d50717577,37707,1733842502956 2024-12-10T14:55:58,506 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=51 updating hbase:meta row=8f098689ed91c97341e38a2890fcaa08, regionState=CLOSING, regionLocation=db1d50717577,45255,1733842503015 2024-12-10T14:55:58,508 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-10T14:55:58,508 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=53, ppid=52, state=RUNNABLE; CloseRegionProcedure 6d19b980654796af47ae33f9a1b762f2, server=db1d50717577,37707,1733842502956}] 2024-12-10T14:55:58,509 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-10T14:55:58,509 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=54, ppid=51, state=RUNNABLE; CloseRegionProcedure 8f098689ed91c97341e38a2890fcaa08, server=db1d50717577,45255,1733842503015}] 2024-12-10T14:55:58,594 ERROR [ProcedureDispatcherTimeoutThread {}] procedure2.RemoteProcedureDispatcher$TimeoutExecutorThread(331): DelayQueue for RemoteProcedureDispatcher is not empty when timed waiting elapsed. If this is repeated consistently, it means no element is getting expired from the queue and it might freeze the system. Queue: [containedObject=db1d50717577,37707,1733842502956, timeout=1733842558659, delay=65, operations=[pid=53, ppid=52, state=RUNNABLE; CloseRegionProcedure 6d19b980654796af47ae33f9a1b762f2, server=db1d50717577,37707,1733842502956], containedObject=db1d50717577,45255,1733842503015, timeout=1733842558660, delay=66, operations=[pid=54, ppid=51, state=RUNNABLE; CloseRegionProcedure 8f098689ed91c97341e38a2890fcaa08, server=db1d50717577,45255,1733842503015]] 2024-12-10T14:55:58,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-10T14:55:58,660 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,37707,1733842502956 2024-12-10T14:55:58,660 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] handler.UnassignRegionHandler(124): Close 6d19b980654796af47ae33f9a1b762f2 2024-12-10T14:55:58,661 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,45255,1733842503015 2024-12-10T14:55:58,661 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-10T14:55:58,661 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1681): Closing 6d19b980654796af47ae33f9a1b762f2, disabling compactions & flushes 2024-12-10T14:55:58,661 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1703): Closing region testExportWithResetTtl,1,1733842537734.6d19b980654796af47ae33f9a1b762f2. 2024-12-10T14:55:58,661 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportWithResetTtl,1,1733842537734.6d19b980654796af47ae33f9a1b762f2. 2024-12-10T14:55:58,661 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] handler.UnassignRegionHandler(124): Close 8f098689ed91c97341e38a2890fcaa08 2024-12-10T14:55:58,661 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1791): Acquired close lock on testExportWithResetTtl,1,1733842537734.6d19b980654796af47ae33f9a1b762f2. after waiting 0 ms 2024-12-10T14:55:58,661 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1801): Updates disabled for region testExportWithResetTtl,1,1733842537734.6d19b980654796af47ae33f9a1b762f2. 2024-12-10T14:55:58,661 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-10T14:55:58,661 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1681): Closing 8f098689ed91c97341e38a2890fcaa08, disabling compactions & flushes 2024-12-10T14:55:58,661 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1703): Closing region testExportWithResetTtl,,1733842537734.8f098689ed91c97341e38a2890fcaa08. 2024-12-10T14:55:58,661 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportWithResetTtl,,1733842537734.8f098689ed91c97341e38a2890fcaa08. 2024-12-10T14:55:58,661 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1791): Acquired close lock on testExportWithResetTtl,,1733842537734.8f098689ed91c97341e38a2890fcaa08. after waiting 0 ms 2024-12-10T14:55:58,661 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1801): Updates disabled for region testExportWithResetTtl,,1733842537734.8f098689ed91c97341e38a2890fcaa08. 2024-12-10T14:55:58,667 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testExportWithResetTtl/6d19b980654796af47ae33f9a1b762f2/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-10T14:55:58,667 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testExportWithResetTtl/8f098689ed91c97341e38a2890fcaa08/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-10T14:55:58,668 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-10T14:55:58,668 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-10T14:55:58,668 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1922): Closed testExportWithResetTtl,,1733842537734.8f098689ed91c97341e38a2890fcaa08. 2024-12-10T14:55:58,668 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1922): Closed testExportWithResetTtl,1,1733842537734.6d19b980654796af47ae33f9a1b762f2. 2024-12-10T14:55:58,668 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1635): Region close journal for 8f098689ed91c97341e38a2890fcaa08: 2024-12-10T14:55:58,668 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1635): Region close journal for 6d19b980654796af47ae33f9a1b762f2: 2024-12-10T14:55:58,669 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] handler.UnassignRegionHandler(170): Closed 8f098689ed91c97341e38a2890fcaa08 2024-12-10T14:55:58,670 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=51 updating hbase:meta row=8f098689ed91c97341e38a2890fcaa08, regionState=CLOSED 2024-12-10T14:55:58,670 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] handler.UnassignRegionHandler(170): Closed 6d19b980654796af47ae33f9a1b762f2 2024-12-10T14:55:58,671 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=52 updating hbase:meta row=6d19b980654796af47ae33f9a1b762f2, regionState=CLOSED 2024-12-10T14:55:58,674 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=54, resume processing ppid=51 2024-12-10T14:55:58,674 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=53, resume processing ppid=52 2024-12-10T14:55:58,674 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=54, ppid=51, state=SUCCESS; CloseRegionProcedure 8f098689ed91c97341e38a2890fcaa08, server=db1d50717577,45255,1733842503015 in 163 msec 2024-12-10T14:55:58,674 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=53, ppid=52, state=SUCCESS; CloseRegionProcedure 6d19b980654796af47ae33f9a1b762f2, server=db1d50717577,37707,1733842502956 in 164 msec 2024-12-10T14:55:58,675 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=51, ppid=50, state=SUCCESS; TransitRegionStateProcedure table=testExportWithResetTtl, region=8f098689ed91c97341e38a2890fcaa08, UNASSIGN in 170 msec 2024-12-10T14:55:58,676 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=52, resume processing ppid=50 2024-12-10T14:55:58,676 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=52, ppid=50, state=SUCCESS; TransitRegionStateProcedure table=testExportWithResetTtl, region=6d19b980654796af47ae33f9a1b762f2, UNASSIGN in 170 msec 2024-12-10T14:55:58,679 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=50, resume processing ppid=49 2024-12-10T14:55:58,679 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=50, ppid=49, state=SUCCESS; CloseTableRegionsProcedure table=testExportWithResetTtl in 174 msec 2024-12-10T14:55:58,680 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733842558680"}]},"ts":"1733842558680"} 2024-12-10T14:55:58,682 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testExportWithResetTtl, state=DISABLED in hbase:meta 2024-12-10T14:55:58,684 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testExportWithResetTtl to state=DISABLED 2024-12-10T14:55:58,686 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=49, state=SUCCESS; DisableTableProcedure table=testExportWithResetTtl in 190 msec 2024-12-10T14:55:58,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-10T14:55:58,801 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testExportWithResetTtl, procId: 49 completed 2024-12-10T14:55:58,801 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testExportWithResetTtl 2024-12-10T14:55:58,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] procedure2.ProcedureExecutor(1098): Stored pid=55, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testExportWithResetTtl 2024-12-10T14:55:58,803 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=55, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-10T14:55:58,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] access.PermissionStorage(259): Removing permissions of removed table testExportWithResetTtl 2024-12-10T14:55:58,804 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=55, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-10T14:55:58,806 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41775 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testExportWithResetTtl 2024-12-10T14:55:58,808 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testExportWithResetTtl/8f098689ed91c97341e38a2890fcaa08 2024-12-10T14:55:58,808 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testExportWithResetTtl/6d19b980654796af47ae33f9a1b762f2 2024-12-10T14:55:58,810 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testExportWithResetTtl/6d19b980654796af47ae33f9a1b762f2/cf, FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testExportWithResetTtl/6d19b980654796af47ae33f9a1b762f2/recovered.edits] 2024-12-10T14:55:58,810 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testExportWithResetTtl/8f098689ed91c97341e38a2890fcaa08/cf, FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testExportWithResetTtl/8f098689ed91c97341e38a2890fcaa08/recovered.edits] 2024-12-10T14:55:58,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-10T14:55:58,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-10T14:55:58,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45255-0x1019cdee8310003, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-10T14:55:58,811 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-10T14:55:58,811 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-10T14:55:58,811 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-10T14:55:58,812 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-10T14:55:58,812 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-10T14:55:58,820 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testExportWithResetTtl/6d19b980654796af47ae33f9a1b762f2/cf/cf2defb6361041379e8848bf00ad8f57 to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/archive/data/default/testExportWithResetTtl/6d19b980654796af47ae33f9a1b762f2/cf/cf2defb6361041379e8848bf00ad8f57 2024-12-10T14:55:58,820 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testExportWithResetTtl/8f098689ed91c97341e38a2890fcaa08/cf/ae616ff09e0b4c17940e36de6d2b36c3 to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/archive/data/default/testExportWithResetTtl/8f098689ed91c97341e38a2890fcaa08/cf/ae616ff09e0b4c17940e36de6d2b36c3 2024-12-10T14:55:58,821 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-10T14:55:58,821 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45255-0x1019cdee8310003, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-10T14:55:58,821 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-10T14:55:58,821 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:55:58,821 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45255-0x1019cdee8310003, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:55:58,821 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-10T14:55:58,821 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:55:58,821 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:55:58,822 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-10T14:55:58,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-10T14:55:58,822 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-10T14:55:58,822 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-10T14:55:58,823 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-10T14:55:58,825 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testExportWithResetTtl/6d19b980654796af47ae33f9a1b762f2/recovered.edits/8.seqid to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/archive/data/default/testExportWithResetTtl/6d19b980654796af47ae33f9a1b762f2/recovered.edits/8.seqid 2024-12-10T14:55:58,825 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testExportWithResetTtl/8f098689ed91c97341e38a2890fcaa08/recovered.edits/8.seqid to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/archive/data/default/testExportWithResetTtl/8f098689ed91c97341e38a2890fcaa08/recovered.edits/8.seqid 2024-12-10T14:55:58,826 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testExportWithResetTtl/6d19b980654796af47ae33f9a1b762f2 2024-12-10T14:55:58,826 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testExportWithResetTtl/8f098689ed91c97341e38a2890fcaa08 2024-12-10T14:55:58,826 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testExportWithResetTtl regions 2024-12-10T14:55:58,828 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=55, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-10T14:55:58,831 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testExportWithResetTtl from hbase:meta 2024-12-10T14:55:58,833 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(408): Removing 'testExportWithResetTtl' descriptor. 2024-12-10T14:55:58,835 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=55, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-10T14:55:58,835 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(398): Removing 'testExportWithResetTtl' from region states. 2024-12-10T14:55:58,835 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testExportWithResetTtl,,1733842537734.8f098689ed91c97341e38a2890fcaa08.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733842558835"}]},"ts":"9223372036854775807"} 2024-12-10T14:55:58,835 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testExportWithResetTtl,1,1733842537734.6d19b980654796af47ae33f9a1b762f2.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733842558835"}]},"ts":"9223372036854775807"} 2024-12-10T14:55:58,838 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-10T14:55:58,838 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 8f098689ed91c97341e38a2890fcaa08, NAME => 'testExportWithResetTtl,,1733842537734.8f098689ed91c97341e38a2890fcaa08.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 6d19b980654796af47ae33f9a1b762f2, NAME => 'testExportWithResetTtl,1,1733842537734.6d19b980654796af47ae33f9a1b762f2.', STARTKEY => '1', ENDKEY => ''}] 2024-12-10T14:55:58,838 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(402): Marking 'testExportWithResetTtl' as deleted. 2024-12-10T14:55:58,838 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733842558838"}]},"ts":"9223372036854775807"} 2024-12-10T14:55:58,840 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table testExportWithResetTtl state from META 2024-12-10T14:55:58,842 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=55, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-10T14:55:58,844 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=55, state=SUCCESS; DeleteTableProcedure table=testExportWithResetTtl in 40 msec 2024-12-10T14:55:58,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-10T14:55:58,924 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testExportWithResetTtl, procId: 55 completed 2024-12-10T14:55:58,924 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportWithResetTtl 2024-12-10T14:55:58,925 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportWithResetTtl 2024-12-10T14:55:58,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] procedure2.ProcedureExecutor(1098): Stored pid=56, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportWithResetTtl 2024-12-10T14:55:58,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=56 2024-12-10T14:55:58,928 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733842558928"}]},"ts":"1733842558928"} 2024-12-10T14:55:58,930 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithResetTtl, state=DISABLING in hbase:meta 2024-12-10T14:55:58,932 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithResetTtl to state=DISABLING 2024-12-10T14:55:58,933 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=57, ppid=56, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl}] 2024-12-10T14:55:58,934 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=4a1aefbc6d60395d01fda31124f1765c, UNASSIGN}, {pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=136851938ae5fccaa2ca171abc942d0e, UNASSIGN}] 2024-12-10T14:55:58,935 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=136851938ae5fccaa2ca171abc942d0e, UNASSIGN 2024-12-10T14:55:58,935 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=4a1aefbc6d60395d01fda31124f1765c, UNASSIGN 2024-12-10T14:55:58,936 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=58 updating hbase:meta row=4a1aefbc6d60395d01fda31124f1765c, regionState=CLOSING, regionLocation=db1d50717577,41775,1733842502887 2024-12-10T14:55:58,936 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=59 updating hbase:meta row=136851938ae5fccaa2ca171abc942d0e, regionState=CLOSING, regionLocation=db1d50717577,45255,1733842503015 2024-12-10T14:55:58,938 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-10T14:55:58,938 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=60, ppid=58, state=RUNNABLE; CloseRegionProcedure 4a1aefbc6d60395d01fda31124f1765c, server=db1d50717577,41775,1733842502887}] 2024-12-10T14:55:58,939 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-10T14:55:58,939 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=61, ppid=59, state=RUNNABLE; CloseRegionProcedure 136851938ae5fccaa2ca171abc942d0e, server=db1d50717577,45255,1733842503015}] 2024-12-10T14:55:59,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=56 2024-12-10T14:55:59,090 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,41775,1733842502887 2024-12-10T14:55:59,091 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(124): Close 4a1aefbc6d60395d01fda31124f1765c 2024-12-10T14:55:59,091 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,45255,1733842503015 2024-12-10T14:55:59,091 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-10T14:55:59,091 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1681): Closing 4a1aefbc6d60395d01fda31124f1765c, disabling compactions & flushes 2024-12-10T14:55:59,091 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1703): Closing region testtb-testExportWithResetTtl,,1733842536384.4a1aefbc6d60395d01fda31124f1765c. 2024-12-10T14:55:59,091 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithResetTtl,,1733842536384.4a1aefbc6d60395d01fda31124f1765c. 2024-12-10T14:55:59,091 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithResetTtl,,1733842536384.4a1aefbc6d60395d01fda31124f1765c. after waiting 0 ms 2024-12-10T14:55:59,091 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithResetTtl,,1733842536384.4a1aefbc6d60395d01fda31124f1765c. 2024-12-10T14:55:59,091 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(124): Close 136851938ae5fccaa2ca171abc942d0e 2024-12-10T14:55:59,091 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-10T14:55:59,092 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1681): Closing 136851938ae5fccaa2ca171abc942d0e, disabling compactions & flushes 2024-12-10T14:55:59,092 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1703): Closing region testtb-testExportWithResetTtl,1,1733842536384.136851938ae5fccaa2ca171abc942d0e. 2024-12-10T14:55:59,092 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithResetTtl,1,1733842536384.136851938ae5fccaa2ca171abc942d0e. 2024-12-10T14:55:59,092 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithResetTtl,1,1733842536384.136851938ae5fccaa2ca171abc942d0e. after waiting 0 ms 2024-12-10T14:55:59,092 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithResetTtl,1,1733842536384.136851938ae5fccaa2ca171abc942d0e. 2024-12-10T14:55:59,097 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithResetTtl/136851938ae5fccaa2ca171abc942d0e/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-10T14:55:59,098 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-10T14:55:59,098 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1922): Closed testtb-testExportWithResetTtl,1,1733842536384.136851938ae5fccaa2ca171abc942d0e. 2024-12-10T14:55:59,098 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1635): Region close journal for 136851938ae5fccaa2ca171abc942d0e: 2024-12-10T14:55:59,098 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithResetTtl/4a1aefbc6d60395d01fda31124f1765c/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-10T14:55:59,098 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-10T14:55:59,098 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1922): Closed testtb-testExportWithResetTtl,,1733842536384.4a1aefbc6d60395d01fda31124f1765c. 2024-12-10T14:55:59,098 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1635): Region close journal for 4a1aefbc6d60395d01fda31124f1765c: 2024-12-10T14:55:59,099 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(170): Closed 136851938ae5fccaa2ca171abc942d0e 2024-12-10T14:55:59,100 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=59 updating hbase:meta row=136851938ae5fccaa2ca171abc942d0e, regionState=CLOSED 2024-12-10T14:55:59,100 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(170): Closed 4a1aefbc6d60395d01fda31124f1765c 2024-12-10T14:55:59,101 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=58 updating hbase:meta row=4a1aefbc6d60395d01fda31124f1765c, regionState=CLOSED 2024-12-10T14:55:59,108 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=61, resume processing ppid=59 2024-12-10T14:55:59,108 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=61, ppid=59, state=SUCCESS; CloseRegionProcedure 136851938ae5fccaa2ca171abc942d0e, server=db1d50717577,45255,1733842503015 in 164 msec 2024-12-10T14:55:59,109 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=60, resume processing ppid=58 2024-12-10T14:55:59,109 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=59, ppid=57, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=136851938ae5fccaa2ca171abc942d0e, UNASSIGN in 174 msec 2024-12-10T14:55:59,110 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=60, ppid=58, state=SUCCESS; CloseRegionProcedure 4a1aefbc6d60395d01fda31124f1765c, server=db1d50717577,41775,1733842502887 in 166 msec 2024-12-10T14:55:59,111 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=58, resume processing ppid=57 2024-12-10T14:55:59,111 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=58, ppid=57, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=4a1aefbc6d60395d01fda31124f1765c, UNASSIGN in 175 msec 2024-12-10T14:55:59,113 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=57, resume processing ppid=56 2024-12-10T14:55:59,113 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=57, ppid=56, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl in 178 msec 2024-12-10T14:55:59,114 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733842559114"}]},"ts":"1733842559114"} 2024-12-10T14:55:59,116 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithResetTtl, state=DISABLED in hbase:meta 2024-12-10T14:55:59,118 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithResetTtl to state=DISABLED 2024-12-10T14:55:59,119 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=56, state=SUCCESS; DisableTableProcedure table=testtb-testExportWithResetTtl in 193 msec 2024-12-10T14:55:59,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=56 2024-12-10T14:55:59,231 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportWithResetTtl, procId: 56 completed 2024-12-10T14:55:59,231 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportWithResetTtl 2024-12-10T14:55:59,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] procedure2.ProcedureExecutor(1098): Stored pid=62, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-10T14:55:59,233 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=62, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-10T14:55:59,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportWithResetTtl 2024-12-10T14:55:59,234 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=62, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-10T14:55:59,236 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41775 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportWithResetTtl 2024-12-10T14:55:59,238 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithResetTtl/4a1aefbc6d60395d01fda31124f1765c 2024-12-10T14:55:59,238 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithResetTtl/136851938ae5fccaa2ca171abc942d0e 2024-12-10T14:55:59,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-10T14:55:59,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-10T14:55:59,240 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithResetTtl/4a1aefbc6d60395d01fda31124f1765c/cf, FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithResetTtl/4a1aefbc6d60395d01fda31124f1765c/recovered.edits] 2024-12-10T14:55:59,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45255-0x1019cdee8310003, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-10T14:55:59,240 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithResetTtl/136851938ae5fccaa2ca171abc942d0e/cf, FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithResetTtl/136851938ae5fccaa2ca171abc942d0e/recovered.edits] 2024-12-10T14:55:59,241 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-10T14:55:59,241 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-10T14:55:59,241 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-10T14:55:59,241 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-10T14:55:59,242 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-10T14:55:59,242 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45255-0x1019cdee8310003, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-10T14:55:59,242 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:55:59,242 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:55:59,242 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-10T14:55:59,242 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45255-0x1019cdee8310003, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:55:59,243 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:55:59,243 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data null 2024-12-10T14:55:59,243 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(135): Skipping permission cache refresh because writable data is empty 2024-12-10T14:55:59,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=62 2024-12-10T14:55:59,247 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithResetTtl/136851938ae5fccaa2ca171abc942d0e/cf/8530d71659db419cb3cf1b4e0d51ebea to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/archive/data/default/testtb-testExportWithResetTtl/136851938ae5fccaa2ca171abc942d0e/cf/8530d71659db419cb3cf1b4e0d51ebea 2024-12-10T14:55:59,247 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithResetTtl/4a1aefbc6d60395d01fda31124f1765c/cf/36febbc65d504d67ad3cf5596d2e0aa6 to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/archive/data/default/testtb-testExportWithResetTtl/4a1aefbc6d60395d01fda31124f1765c/cf/36febbc65d504d67ad3cf5596d2e0aa6 2024-12-10T14:55:59,251 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithResetTtl/136851938ae5fccaa2ca171abc942d0e/recovered.edits/9.seqid to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/archive/data/default/testtb-testExportWithResetTtl/136851938ae5fccaa2ca171abc942d0e/recovered.edits/9.seqid 2024-12-10T14:55:59,252 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithResetTtl/4a1aefbc6d60395d01fda31124f1765c/recovered.edits/9.seqid to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/archive/data/default/testtb-testExportWithResetTtl/4a1aefbc6d60395d01fda31124f1765c/recovered.edits/9.seqid 2024-12-10T14:55:59,252 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithResetTtl/136851938ae5fccaa2ca171abc942d0e 2024-12-10T14:55:59,252 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithResetTtl/4a1aefbc6d60395d01fda31124f1765c 2024-12-10T14:55:59,252 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithResetTtl regions 2024-12-10T14:55:59,255 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=62, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-10T14:55:59,258 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportWithResetTtl from hbase:meta 2024-12-10T14:55:59,261 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportWithResetTtl' descriptor. 2024-12-10T14:55:59,262 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=62, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-10T14:55:59,262 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportWithResetTtl' from region states. 2024-12-10T14:55:59,263 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,,1733842536384.4a1aefbc6d60395d01fda31124f1765c.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733842559262"}]},"ts":"9223372036854775807"} 2024-12-10T14:55:59,263 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,1,1733842536384.136851938ae5fccaa2ca171abc942d0e.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733842559262"}]},"ts":"9223372036854775807"} 2024-12-10T14:55:59,265 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-10T14:55:59,265 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 4a1aefbc6d60395d01fda31124f1765c, NAME => 'testtb-testExportWithResetTtl,,1733842536384.4a1aefbc6d60395d01fda31124f1765c.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 136851938ae5fccaa2ca171abc942d0e, NAME => 'testtb-testExportWithResetTtl,1,1733842536384.136851938ae5fccaa2ca171abc942d0e.', STARTKEY => '1', ENDKEY => ''}] 2024-12-10T14:55:59,265 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportWithResetTtl' as deleted. 2024-12-10T14:55:59,265 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733842559265"}]},"ts":"9223372036854775807"} 2024-12-10T14:55:59,267 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportWithResetTtl state from META 2024-12-10T14:55:59,269 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=62, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-10T14:55:59,270 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=62, state=SUCCESS; DeleteTableProcedure table=testtb-testExportWithResetTtl in 38 msec 2024-12-10T14:55:59,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=62 2024-12-10T14:55:59,346 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportWithResetTtl, procId: 62 completed 2024-12-10T14:55:59,356 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithResetTtl" 2024-12-10T14:55:59,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportWithResetTtl 2024-12-10T14:55:59,360 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb-testExportWithResetTtl" 2024-12-10T14:55:59,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb-testExportWithResetTtl 2024-12-10T14:55:59,364 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithResetTtl" 2024-12-10T14:55:59,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportWithResetTtl 2024-12-10T14:55:59,397 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithResetTtl Thread=803 (was 778) Potentially hanging thread: ApplicationMasterLauncher #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x50bdc134-shared-pool-14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x50bdc134-shared-pool-15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1891196410) connection to localhost/127.0.0.1:35987 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41679 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_996159037_22 at /127.0.0.1:36750 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_668118885_1 at /127.0.0.1:39068 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x50bdc134-shared-pool-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x50bdc134-shared-pool-12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-2031 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_996159037_22 at /127.0.0.1:39096 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_996159037_22 at /127.0.0.1:51120 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35987 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x50bdc134-shared-pool-13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 25619) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x50bdc134-shared-pool-16 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_668118885_1 at /127.0.0.1:36740 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=815 (was 807) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=481 (was 405) - SystemLoadAverage LEAK? -, ProcessCount=17 (was 17), AvailableMemoryMB=2143 (was 2501) 2024-12-10T14:55:59,397 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=803 is superior to 500 2024-12-10T14:55:59,419 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemState Thread=803, OpenFileDescriptor=815, MaxFileDescriptor=1048576, SystemLoadAverage=481, ProcessCount=17, AvailableMemoryMB=2141 2024-12-10T14:55:59,419 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=803 is superior to 500 2024-12-10T14:55:59,421 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-10T14:55:59,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] procedure2.ProcedureExecutor(1098): Stored pid=63, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportFileSystemState 2024-12-10T14:55:59,424 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-12-10T14:55:59,424 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:55:59,424 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemState" procId is: 63 2024-12-10T14:55:59,425 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-10T14:55:59,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-10T14:55:59,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741961_1137 (size=407) 2024-12-10T14:55:59,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741961_1137 (size=407) 2024-12-10T14:55:59,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741961_1137 (size=407) 2024-12-10T14:55:59,436 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => fa5bef8ce85335a6dd0147f15cb9e220, NAME => 'testtb-testExportFileSystemState,1,1733842559421.fa5bef8ce85335a6dd0147f15cb9e220.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef 2024-12-10T14:55:59,436 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => d8b1ebedae14d6d5b339ab8f1bb779d7, NAME => 'testtb-testExportFileSystemState,,1733842559421.d8b1ebedae14d6d5b339ab8f1bb779d7.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef 2024-12-10T14:55:59,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741962_1138 (size=68) 2024-12-10T14:55:59,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741962_1138 (size=68) 2024-12-10T14:55:59,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741963_1139 (size=68) 2024-12-10T14:55:59,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741962_1138 (size=68) 2024-12-10T14:55:59,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741963_1139 (size=68) 2024-12-10T14:55:59,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741963_1139 (size=68) 2024-12-10T14:55:59,452 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemState,1,1733842559421.fa5bef8ce85335a6dd0147f15cb9e220.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:55:59,452 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1681): Closing fa5bef8ce85335a6dd0147f15cb9e220, disabling compactions & flushes 2024-12-10T14:55:59,452 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemState,1,1733842559421.fa5bef8ce85335a6dd0147f15cb9e220. 2024-12-10T14:55:59,452 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemState,,1733842559421.d8b1ebedae14d6d5b339ab8f1bb779d7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:55:59,452 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemState,1,1733842559421.fa5bef8ce85335a6dd0147f15cb9e220. 2024-12-10T14:55:59,452 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemState,1,1733842559421.fa5bef8ce85335a6dd0147f15cb9e220. after waiting 0 ms 2024-12-10T14:55:59,452 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1681): Closing d8b1ebedae14d6d5b339ab8f1bb779d7, disabling compactions & flushes 2024-12-10T14:55:59,452 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemState,1,1733842559421.fa5bef8ce85335a6dd0147f15cb9e220. 2024-12-10T14:55:59,452 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemState,,1733842559421.d8b1ebedae14d6d5b339ab8f1bb779d7. 2024-12-10T14:55:59,452 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemState,1,1733842559421.fa5bef8ce85335a6dd0147f15cb9e220. 2024-12-10T14:55:59,452 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemState,,1733842559421.d8b1ebedae14d6d5b339ab8f1bb779d7. 2024-12-10T14:55:59,452 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemState,,1733842559421.d8b1ebedae14d6d5b339ab8f1bb779d7. after waiting 0 ms 2024-12-10T14:55:59,452 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1635): Region close journal for fa5bef8ce85335a6dd0147f15cb9e220: 2024-12-10T14:55:59,452 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemState,,1733842559421.d8b1ebedae14d6d5b339ab8f1bb779d7. 2024-12-10T14:55:59,453 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemState,,1733842559421.d8b1ebedae14d6d5b339ab8f1bb779d7. 2024-12-10T14:55:59,453 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1635): Region close journal for d8b1ebedae14d6d5b339ab8f1bb779d7: 2024-12-10T14:55:59,454 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-12-10T14:55:59,454 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,1,1733842559421.fa5bef8ce85335a6dd0147f15cb9e220.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733842559454"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733842559454"}]},"ts":"1733842559454"} 2024-12-10T14:55:59,454 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,,1733842559421.d8b1ebedae14d6d5b339ab8f1bb779d7.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733842559454"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733842559454"}]},"ts":"1733842559454"} 2024-12-10T14:55:59,456 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-10T14:55:59,457 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-10T14:55:59,457 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733842559457"}]},"ts":"1733842559457"} 2024-12-10T14:55:59,459 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemState, state=ENABLING in hbase:meta 2024-12-10T14:55:59,464 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(202): Hosts are {db1d50717577=0} racks are {/default-rack=0} 2024-12-10T14:55:59,465 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-10T14:55:59,465 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-10T14:55:59,465 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-10T14:55:59,465 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-10T14:55:59,465 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-10T14:55:59,465 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-10T14:55:59,465 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-10T14:55:59,466 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=d8b1ebedae14d6d5b339ab8f1bb779d7, ASSIGN}, {pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=fa5bef8ce85335a6dd0147f15cb9e220, ASSIGN}] 2024-12-10T14:55:59,467 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=fa5bef8ce85335a6dd0147f15cb9e220, ASSIGN 2024-12-10T14:55:59,467 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=d8b1ebedae14d6d5b339ab8f1bb779d7, ASSIGN 2024-12-10T14:55:59,467 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=fa5bef8ce85335a6dd0147f15cb9e220, ASSIGN; state=OFFLINE, location=db1d50717577,41775,1733842502887; forceNewPlan=false, retain=false 2024-12-10T14:55:59,468 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=d8b1ebedae14d6d5b339ab8f1bb779d7, ASSIGN; state=OFFLINE, location=db1d50717577,37707,1733842502956; forceNewPlan=false, retain=false 2024-12-10T14:55:59,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-10T14:55:59,618 INFO [db1d50717577:36533 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-10T14:55:59,618 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=64 updating hbase:meta row=d8b1ebedae14d6d5b339ab8f1bb779d7, regionState=OPENING, regionLocation=db1d50717577,37707,1733842502956 2024-12-10T14:55:59,618 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=fa5bef8ce85335a6dd0147f15cb9e220, regionState=OPENING, regionLocation=db1d50717577,41775,1733842502887 2024-12-10T14:55:59,620 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=66, ppid=64, state=RUNNABLE; OpenRegionProcedure d8b1ebedae14d6d5b339ab8f1bb779d7, server=db1d50717577,37707,1733842502956}] 2024-12-10T14:55:59,621 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=67, ppid=65, state=RUNNABLE; OpenRegionProcedure fa5bef8ce85335a6dd0147f15cb9e220, server=db1d50717577,41775,1733842502887}] 2024-12-10T14:55:59,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-10T14:55:59,772 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,37707,1733842502956 2024-12-10T14:55:59,773 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,41775,1733842502887 2024-12-10T14:55:59,776 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemState,,1733842559421.d8b1ebedae14d6d5b339ab8f1bb779d7. 2024-12-10T14:55:59,777 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7285): Opening region: {ENCODED => d8b1ebedae14d6d5b339ab8f1bb779d7, NAME => 'testtb-testExportFileSystemState,,1733842559421.d8b1ebedae14d6d5b339ab8f1bb779d7.', STARTKEY => '', ENDKEY => '1'} 2024-12-10T14:55:59,777 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemState,1,1733842559421.fa5bef8ce85335a6dd0147f15cb9e220. 2024-12-10T14:55:59,777 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7285): Opening region: {ENCODED => fa5bef8ce85335a6dd0147f15cb9e220, NAME => 'testtb-testExportFileSystemState,1,1733842559421.fa5bef8ce85335a6dd0147f15cb9e220.', STARTKEY => '1', ENDKEY => ''} 2024-12-10T14:55:59,777 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemState,,1733842559421.d8b1ebedae14d6d5b339ab8f1bb779d7. service=AccessControlService 2024-12-10T14:55:59,777 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemState,1,1733842559421.fa5bef8ce85335a6dd0147f15cb9e220. service=AccessControlService 2024-12-10T14:55:59,777 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-10T14:55:59,777 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-10T14:55:59,777 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState d8b1ebedae14d6d5b339ab8f1bb779d7 2024-12-10T14:55:59,778 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemState,,1733842559421.d8b1ebedae14d6d5b339ab8f1bb779d7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:55:59,778 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState fa5bef8ce85335a6dd0147f15cb9e220 2024-12-10T14:55:59,778 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7327): checking encryption for d8b1ebedae14d6d5b339ab8f1bb779d7 2024-12-10T14:55:59,778 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7330): checking classloading for d8b1ebedae14d6d5b339ab8f1bb779d7 2024-12-10T14:55:59,778 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemState,1,1733842559421.fa5bef8ce85335a6dd0147f15cb9e220.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:55:59,778 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7327): checking encryption for fa5bef8ce85335a6dd0147f15cb9e220 2024-12-10T14:55:59,778 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7330): checking classloading for fa5bef8ce85335a6dd0147f15cb9e220 2024-12-10T14:55:59,779 INFO [StoreOpener-d8b1ebedae14d6d5b339ab8f1bb779d7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region d8b1ebedae14d6d5b339ab8f1bb779d7 2024-12-10T14:55:59,779 INFO [StoreOpener-fa5bef8ce85335a6dd0147f15cb9e220-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region fa5bef8ce85335a6dd0147f15cb9e220 2024-12-10T14:55:59,781 INFO [StoreOpener-fa5bef8ce85335a6dd0147f15cb9e220-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fa5bef8ce85335a6dd0147f15cb9e220 columnFamilyName cf 2024-12-10T14:55:59,781 INFO [StoreOpener-d8b1ebedae14d6d5b339ab8f1bb779d7-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d8b1ebedae14d6d5b339ab8f1bb779d7 columnFamilyName cf 2024-12-10T14:55:59,781 DEBUG [StoreOpener-d8b1ebedae14d6d5b339ab8f1bb779d7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:55:59,781 DEBUG [StoreOpener-fa5bef8ce85335a6dd0147f15cb9e220-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:55:59,782 INFO [StoreOpener-fa5bef8ce85335a6dd0147f15cb9e220-1 {}] regionserver.HStore(327): Store=fa5bef8ce85335a6dd0147f15cb9e220/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T14:55:59,782 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemState/fa5bef8ce85335a6dd0147f15cb9e220 2024-12-10T14:55:59,783 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemState/fa5bef8ce85335a6dd0147f15cb9e220 2024-12-10T14:55:59,783 INFO [StoreOpener-d8b1ebedae14d6d5b339ab8f1bb779d7-1 {}] regionserver.HStore(327): Store=d8b1ebedae14d6d5b339ab8f1bb779d7/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T14:55:59,784 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemState/d8b1ebedae14d6d5b339ab8f1bb779d7 2024-12-10T14:55:59,784 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemState/d8b1ebedae14d6d5b339ab8f1bb779d7 2024-12-10T14:55:59,785 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1085): writing seq id for fa5bef8ce85335a6dd0147f15cb9e220 2024-12-10T14:55:59,787 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1085): writing seq id for d8b1ebedae14d6d5b339ab8f1bb779d7 2024-12-10T14:55:59,787 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemState/fa5bef8ce85335a6dd0147f15cb9e220/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T14:55:59,788 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1102): Opened fa5bef8ce85335a6dd0147f15cb9e220; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72788854, jitterRate=0.08463844656944275}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-10T14:55:59,789 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemState/d8b1ebedae14d6d5b339ab8f1bb779d7/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T14:55:59,789 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1001): Region open journal for fa5bef8ce85335a6dd0147f15cb9e220: 2024-12-10T14:55:59,789 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1102): Opened d8b1ebedae14d6d5b339ab8f1bb779d7; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69346475, jitterRate=0.03334300220012665}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-10T14:55:59,790 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1001): Region open journal for d8b1ebedae14d6d5b339ab8f1bb779d7: 2024-12-10T14:55:59,790 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemState,1,1733842559421.fa5bef8ce85335a6dd0147f15cb9e220., pid=67, masterSystemTime=1733842559773 2024-12-10T14:55:59,790 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemState,,1733842559421.d8b1ebedae14d6d5b339ab8f1bb779d7., pid=66, masterSystemTime=1733842559772 2024-12-10T14:55:59,791 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemState,1,1733842559421.fa5bef8ce85335a6dd0147f15cb9e220. 2024-12-10T14:55:59,791 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemState,1,1733842559421.fa5bef8ce85335a6dd0147f15cb9e220. 2024-12-10T14:55:59,792 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=fa5bef8ce85335a6dd0147f15cb9e220, regionState=OPEN, openSeqNum=2, regionLocation=db1d50717577,41775,1733842502887 2024-12-10T14:55:59,792 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemState,,1733842559421.d8b1ebedae14d6d5b339ab8f1bb779d7. 2024-12-10T14:55:59,792 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemState,,1733842559421.d8b1ebedae14d6d5b339ab8f1bb779d7. 2024-12-10T14:55:59,793 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=64 updating hbase:meta row=d8b1ebedae14d6d5b339ab8f1bb779d7, regionState=OPEN, openSeqNum=2, regionLocation=db1d50717577,37707,1733842502956 2024-12-10T14:55:59,795 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=67, resume processing ppid=65 2024-12-10T14:55:59,795 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=67, ppid=65, state=SUCCESS; OpenRegionProcedure fa5bef8ce85335a6dd0147f15cb9e220, server=db1d50717577,41775,1733842502887 in 173 msec 2024-12-10T14:55:59,797 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=66, resume processing ppid=64 2024-12-10T14:55:59,797 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=65, ppid=63, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=fa5bef8ce85335a6dd0147f15cb9e220, ASSIGN in 330 msec 2024-12-10T14:55:59,797 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=66, ppid=64, state=SUCCESS; OpenRegionProcedure d8b1ebedae14d6d5b339ab8f1bb779d7, server=db1d50717577,37707,1733842502956 in 175 msec 2024-12-10T14:55:59,799 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=64, resume processing ppid=63 2024-12-10T14:55:59,799 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=64, ppid=63, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=d8b1ebedae14d6d5b339ab8f1bb779d7, ASSIGN in 332 msec 2024-12-10T14:55:59,800 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-10T14:55:59,800 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733842559800"}]},"ts":"1733842559800"} 2024-12-10T14:55:59,801 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemState, state=ENABLED in hbase:meta 2024-12-10T14:55:59,804 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-12-10T14:55:59,804 DEBUG [PEWorker-5 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportFileSystemState jenkins: RWXCA 2024-12-10T14:55:59,806 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41775 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-10T14:55:59,809 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:55:59,809 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45255-0x1019cdee8310003, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:55:59,809 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:55:59,809 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:55:59,810 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-10T14:55:59,810 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-10T14:55:59,810 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-10T14:55:59,811 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-10T14:55:59,812 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=63, state=SUCCESS; CreateTableProcedure table=testtb-testExportFileSystemState in 389 msec 2024-12-10T14:56:00,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-10T14:56:00,030 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportFileSystemState, procId: 63 completed 2024-12-10T14:56:00,030 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testExportFileSystemState get assigned. Timeout = 60000ms 2024-12-10T14:56:00,031 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-10T14:56:00,035 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testExportFileSystemState assigned to meta. Checking AM states. 2024-12-10T14:56:00,035 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-10T14:56:00,035 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testExportFileSystemState assigned. 2024-12-10T14:56:00,039 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-10T14:56:00,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733842560039 (current time:1733842560039). 2024-12-10T14:56:00,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-10T14:56:00,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-12-10T14:56:00,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-10T14:56:00,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0e924514 to 127.0.0.1:57176 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1d0c12dd 2024-12-10T14:56:00,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@ed4562, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:56:00,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:56:00,047 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58152, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:56:00,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0e924514 to 127.0.0.1:57176 2024-12-10T14:56:00,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:56:00,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x388b4996 to 127.0.0.1:57176 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7c4c8444 2024-12-10T14:56:00,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@166018fa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:56:00,056 DEBUG [hconnection-0x3a6cdf9b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:56:00,057 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58156, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:56:00,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:56:00,059 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44238, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:56:00,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x388b4996 to 127.0.0.1:57176 2024-12-10T14:56:00,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:56:00,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-10T14:56:00,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-10T14:56:00,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] procedure2.ProcedureExecutor(1098): Stored pid=68, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-10T14:56:00,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 68 2024-12-10T14:56:00,063 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-10T14:56:00,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-10T14:56:00,064 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-10T14:56:00,066 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-10T14:56:00,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741964_1140 (size=170) 2024-12-10T14:56:00,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741964_1140 (size=170) 2024-12-10T14:56:00,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741964_1140 (size=170) 2024-12-10T14:56:00,116 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-10T14:56:00,116 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=69, ppid=68, state=RUNNABLE; SnapshotRegionProcedure d8b1ebedae14d6d5b339ab8f1bb779d7}, {pid=70, ppid=68, state=RUNNABLE; SnapshotRegionProcedure fa5bef8ce85335a6dd0147f15cb9e220}] 2024-12-10T14:56:00,117 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=70, ppid=68, state=RUNNABLE; SnapshotRegionProcedure fa5bef8ce85335a6dd0147f15cb9e220 2024-12-10T14:56:00,118 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=69, ppid=68, state=RUNNABLE; SnapshotRegionProcedure d8b1ebedae14d6d5b339ab8f1bb779d7 2024-12-10T14:56:00,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-10T14:56:00,268 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,41775,1733842502887 2024-12-10T14:56:00,268 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,37707,1733842502956 2024-12-10T14:56:00,269 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41775 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=70 2024-12-10T14:56:00,269 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37707 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=69 2024-12-10T14:56:00,269 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1733842559421.d8b1ebedae14d6d5b339ab8f1bb779d7. 2024-12-10T14:56:00,270 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1733842559421.fa5bef8ce85335a6dd0147f15cb9e220. 2024-12-10T14:56:00,270 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.HRegion(2538): Flush status journal for d8b1ebedae14d6d5b339ab8f1bb779d7: 2024-12-10T14:56:00,270 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1733842559421.d8b1ebedae14d6d5b339ab8f1bb779d7. for emptySnaptb0-testExportFileSystemState completed. 2024-12-10T14:56:00,270 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.HRegion(2538): Flush status journal for fa5bef8ce85335a6dd0147f15cb9e220: 2024-12-10T14:56:00,270 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1733842559421.fa5bef8ce85335a6dd0147f15cb9e220. for emptySnaptb0-testExportFileSystemState completed. 2024-12-10T14:56:00,270 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1733842559421.d8b1ebedae14d6d5b339ab8f1bb779d7.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-12-10T14:56:00,270 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-10T14:56:00,270 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1733842559421.fa5bef8ce85335a6dd0147f15cb9e220.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-12-10T14:56:00,270 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-10T14:56:00,270 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-10T14:56:00,270 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-10T14:56:00,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741965_1141 (size=71) 2024-12-10T14:56:00,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741966_1142 (size=71) 2024-12-10T14:56:00,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741965_1141 (size=71) 2024-12-10T14:56:00,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741966_1142 (size=71) 2024-12-10T14:56:00,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741965_1141 (size=71) 2024-12-10T14:56:00,288 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1733842559421.fa5bef8ce85335a6dd0147f15cb9e220. 2024-12-10T14:56:00,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741966_1142 (size=71) 2024-12-10T14:56:00,289 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=70 2024-12-10T14:56:00,289 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1733842559421.d8b1ebedae14d6d5b339ab8f1bb779d7. 2024-12-10T14:56:00,289 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=69 2024-12-10T14:56:00,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.HMaster(4106): Remote procedure done, pid=70 2024-12-10T14:56:00,289 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region fa5bef8ce85335a6dd0147f15cb9e220 2024-12-10T14:56:00,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.HMaster(4106): Remote procedure done, pid=69 2024-12-10T14:56:00,290 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=70, ppid=68, state=RUNNABLE; SnapshotRegionProcedure fa5bef8ce85335a6dd0147f15cb9e220 2024-12-10T14:56:00,290 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region d8b1ebedae14d6d5b339ab8f1bb779d7 2024-12-10T14:56:00,290 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=69, ppid=68, state=RUNNABLE; SnapshotRegionProcedure d8b1ebedae14d6d5b339ab8f1bb779d7 2024-12-10T14:56:00,293 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=70, ppid=68, state=SUCCESS; SnapshotRegionProcedure fa5bef8ce85335a6dd0147f15cb9e220 in 175 msec 2024-12-10T14:56:00,294 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=69, resume processing ppid=68 2024-12-10T14:56:00,294 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=69, ppid=68, state=SUCCESS; SnapshotRegionProcedure d8b1ebedae14d6d5b339ab8f1bb779d7 in 175 msec 2024-12-10T14:56:00,295 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-10T14:56:00,296 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-10T14:56:00,297 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-10T14:56:00,297 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemState 2024-12-10T14:56:00,298 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState 2024-12-10T14:56:00,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741967_1143 (size=552) 2024-12-10T14:56:00,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741967_1143 (size=552) 2024-12-10T14:56:00,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741967_1143 (size=552) 2024-12-10T14:56:00,314 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-10T14:56:00,321 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-10T14:56:00,321 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/emptySnaptb0-testExportFileSystemState 2024-12-10T14:56:00,323 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-10T14:56:00,323 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 68 2024-12-10T14:56:00,325 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=68, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 262 msec 2024-12-10T14:56:00,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-10T14:56:00,366 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState, procId: 68 completed 2024-12-10T14:56:00,377 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37707 {}] regionserver.HRegion(8254): writing data to region testtb-testExportFileSystemState,,1733842559421.d8b1ebedae14d6d5b339ab8f1bb779d7. with WAL disabled. Data may be lost in the event of a crash. 2024-12-10T14:56:00,380 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41775 {}] regionserver.HRegion(8254): writing data to region testtb-testExportFileSystemState,1,1733842559421.fa5bef8ce85335a6dd0147f15cb9e220. with WAL disabled. Data may be lost in the event of a crash. 2024-12-10T14:56:00,385 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportFileSystemState 2024-12-10T14:56:00,385 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportFileSystemState,,1733842559421.d8b1ebedae14d6d5b339ab8f1bb779d7. 2024-12-10T14:56:00,385 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-10T14:56:00,404 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-10T14:56:00,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733842560404 (current time:1733842560404). 2024-12-10T14:56:00,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-10T14:56:00,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-12-10T14:56:00,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-10T14:56:00,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5f864f5e to 127.0.0.1:57176 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@185bc56e 2024-12-10T14:56:00,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@15abbc6c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:56:00,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:56:00,425 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58168, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:56:00,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5f864f5e to 127.0.0.1:57176 2024-12-10T14:56:00,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:56:00,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x76de42b9 to 127.0.0.1:57176 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@306899b9 2024-12-10T14:56:00,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78e80b04, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:56:00,457 DEBUG [hconnection-0x1b55c032-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:56:00,458 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58184, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:56:00,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:56:00,466 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44242, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:56:00,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x76de42b9 to 127.0.0.1:57176 2024-12-10T14:56:00,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:56:00,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-10T14:56:00,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-10T14:56:00,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] procedure2.ProcedureExecutor(1098): Stored pid=71, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-10T14:56:00,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 71 2024-12-10T14:56:00,472 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-10T14:56:00,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-10T14:56:00,473 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-10T14:56:00,475 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-10T14:56:00,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741968_1144 (size=165) 2024-12-10T14:56:00,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741968_1144 (size=165) 2024-12-10T14:56:00,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741968_1144 (size=165) 2024-12-10T14:56:00,492 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-10T14:56:00,492 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE; SnapshotRegionProcedure d8b1ebedae14d6d5b339ab8f1bb779d7}, {pid=73, ppid=71, state=RUNNABLE; SnapshotRegionProcedure fa5bef8ce85335a6dd0147f15cb9e220}] 2024-12-10T14:56:00,493 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=72, ppid=71, state=RUNNABLE; SnapshotRegionProcedure d8b1ebedae14d6d5b339ab8f1bb779d7 2024-12-10T14:56:00,493 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=73, ppid=71, state=RUNNABLE; SnapshotRegionProcedure fa5bef8ce85335a6dd0147f15cb9e220 2024-12-10T14:56:00,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-10T14:56:00,645 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,41775,1733842502887 2024-12-10T14:56:00,645 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,37707,1733842502956 2024-12-10T14:56:00,646 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37707 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=72 2024-12-10T14:56:00,646 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1733842559421.d8b1ebedae14d6d5b339ab8f1bb779d7. 2024-12-10T14:56:00,647 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41775 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=73 2024-12-10T14:56:00,647 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(2837): Flushing d8b1ebedae14d6d5b339ab8f1bb779d7 1/1 column families, dataSize=333 B heapSize=976 B 2024-12-10T14:56:00,647 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1733842559421.fa5bef8ce85335a6dd0147f15cb9e220. 2024-12-10T14:56:00,647 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(2837): Flushing fa5bef8ce85335a6dd0147f15cb9e220 1/1 column families, dataSize=2.93 KB heapSize=6.58 KB 2024-12-10T14:56:00,678 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemState/fa5bef8ce85335a6dd0147f15cb9e220/.tmp/cf/56959a52bcd34f9bbb820d21225ec0f5 is 71, key is 131a714e5ff13c174937e26a5c6762df/cf:q/1733842560380/Put/seqid=0 2024-12-10T14:56:00,678 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemState/d8b1ebedae14d6d5b339ab8f1bb779d7/.tmp/cf/a613c9d39b3b472c9a16f69a03f4aeae is 71, key is 05baba3b5b1234049c5e508aaebbce93/cf:q/1733842560377/Put/seqid=0 2024-12-10T14:56:00,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741970_1146 (size=8188) 2024-12-10T14:56:00,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741970_1146 (size=8188) 2024-12-10T14:56:00,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741970_1146 (size=8188) 2024-12-10T14:56:00,713 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.93 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemState/fa5bef8ce85335a6dd0147f15cb9e220/.tmp/cf/56959a52bcd34f9bbb820d21225ec0f5 2024-12-10T14:56:00,720 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemState/fa5bef8ce85335a6dd0147f15cb9e220/.tmp/cf/56959a52bcd34f9bbb820d21225ec0f5 as hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemState/fa5bef8ce85335a6dd0147f15cb9e220/cf/56959a52bcd34f9bbb820d21225ec0f5 2024-12-10T14:56:00,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741969_1145 (size=5422) 2024-12-10T14:56:00,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741969_1145 (size=5422) 2024-12-10T14:56:00,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741969_1145 (size=5422) 2024-12-10T14:56:00,723 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=333 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemState/d8b1ebedae14d6d5b339ab8f1bb779d7/.tmp/cf/a613c9d39b3b472c9a16f69a03f4aeae 2024-12-10T14:56:00,728 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemState/fa5bef8ce85335a6dd0147f15cb9e220/cf/56959a52bcd34f9bbb820d21225ec0f5, entries=45, sequenceid=6, filesize=8.0 K 2024-12-10T14:56:00,732 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemState/d8b1ebedae14d6d5b339ab8f1bb779d7/.tmp/cf/a613c9d39b3b472c9a16f69a03f4aeae as hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemState/d8b1ebedae14d6d5b339ab8f1bb779d7/cf/a613c9d39b3b472c9a16f69a03f4aeae 2024-12-10T14:56:00,733 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(3040): Finished flush of dataSize ~2.93 KB/3003, heapSize ~6.56 KB/6720, currentSize=0 B/0 for fa5bef8ce85335a6dd0147f15cb9e220 in 85ms, sequenceid=6, compaction requested=false 2024-12-10T14:56:00,733 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemState' 2024-12-10T14:56:00,734 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(2538): Flush status journal for fa5bef8ce85335a6dd0147f15cb9e220: 2024-12-10T14:56:00,734 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1733842559421.fa5bef8ce85335a6dd0147f15cb9e220. for snaptb0-testExportFileSystemState completed. 2024-12-10T14:56:00,734 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1733842559421.fa5bef8ce85335a6dd0147f15cb9e220.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-12-10T14:56:00,734 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-10T14:56:00,734 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemState/fa5bef8ce85335a6dd0147f15cb9e220/cf/56959a52bcd34f9bbb820d21225ec0f5] hfiles 2024-12-10T14:56:00,734 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemState/fa5bef8ce85335a6dd0147f15cb9e220/cf/56959a52bcd34f9bbb820d21225ec0f5 for snapshot=snaptb0-testExportFileSystemState 2024-12-10T14:56:00,739 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemState/d8b1ebedae14d6d5b339ab8f1bb779d7/cf/a613c9d39b3b472c9a16f69a03f4aeae, entries=5, sequenceid=6, filesize=5.3 K 2024-12-10T14:56:00,741 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(3040): Finished flush of dataSize ~333 B/333, heapSize ~960 B/960, currentSize=0 B/0 for d8b1ebedae14d6d5b339ab8f1bb779d7 in 94ms, sequenceid=6, compaction requested=false 2024-12-10T14:56:00,741 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(2538): Flush status journal for d8b1ebedae14d6d5b339ab8f1bb779d7: 2024-12-10T14:56:00,741 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1733842559421.d8b1ebedae14d6d5b339ab8f1bb779d7. for snaptb0-testExportFileSystemState completed. 2024-12-10T14:56:00,741 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1733842559421.d8b1ebedae14d6d5b339ab8f1bb779d7.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-12-10T14:56:00,741 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-10T14:56:00,741 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemState/d8b1ebedae14d6d5b339ab8f1bb779d7/cf/a613c9d39b3b472c9a16f69a03f4aeae] hfiles 2024-12-10T14:56:00,741 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemState/d8b1ebedae14d6d5b339ab8f1bb779d7/cf/a613c9d39b3b472c9a16f69a03f4aeae for snapshot=snaptb0-testExportFileSystemState 2024-12-10T14:56:00,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741971_1147 (size=110) 2024-12-10T14:56:00,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741971_1147 (size=110) 2024-12-10T14:56:00,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741971_1147 (size=110) 2024-12-10T14:56:00,763 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1733842559421.fa5bef8ce85335a6dd0147f15cb9e220. 2024-12-10T14:56:00,763 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=73 2024-12-10T14:56:00,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.HMaster(4106): Remote procedure done, pid=73 2024-12-10T14:56:00,763 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region fa5bef8ce85335a6dd0147f15cb9e220 2024-12-10T14:56:00,764 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=73, ppid=71, state=RUNNABLE; SnapshotRegionProcedure fa5bef8ce85335a6dd0147f15cb9e220 2024-12-10T14:56:00,770 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=73, ppid=71, state=SUCCESS; SnapshotRegionProcedure fa5bef8ce85335a6dd0147f15cb9e220 in 273 msec 2024-12-10T14:56:00,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741972_1148 (size=110) 2024-12-10T14:56:00,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741972_1148 (size=110) 2024-12-10T14:56:00,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741972_1148 (size=110) 2024-12-10T14:56:00,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-10T14:56:00,777 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1733842559421.d8b1ebedae14d6d5b339ab8f1bb779d7. 2024-12-10T14:56:00,777 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-12-10T14:56:00,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.HMaster(4106): Remote procedure done, pid=72 2024-12-10T14:56:00,777 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region d8b1ebedae14d6d5b339ab8f1bb779d7 2024-12-10T14:56:00,778 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=72, ppid=71, state=RUNNABLE; SnapshotRegionProcedure d8b1ebedae14d6d5b339ab8f1bb779d7 2024-12-10T14:56:00,781 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=72, resume processing ppid=71 2024-12-10T14:56:00,781 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=72, ppid=71, state=SUCCESS; SnapshotRegionProcedure d8b1ebedae14d6d5b339ab8f1bb779d7 in 286 msec 2024-12-10T14:56:00,781 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-10T14:56:00,782 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-10T14:56:00,783 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-10T14:56:00,783 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemState 2024-12-10T14:56:00,784 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-10T14:56:00,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741973_1149 (size=630) 2024-12-10T14:56:00,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741973_1149 (size=630) 2024-12-10T14:56:00,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741973_1149 (size=630) 2024-12-10T14:56:00,811 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-10T14:56:00,817 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-10T14:56:00,818 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-10T14:56:00,819 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-10T14:56:00,819 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 71 2024-12-10T14:56:00,821 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 349 msec 2024-12-10T14:56:00,923 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-10T14:56:01,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-10T14:56:01,078 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState, procId: 71 completed 2024-12-10T14:56:01,078 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842561078 2024-12-10T14:56:01,078 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:36449, tgtDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842561078, rawTgtDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842561078, srcFsUri=hdfs://localhost:36449, srcDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef 2024-12-10T14:56:01,114 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:36449, inputRoot=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef 2024-12-10T14:56:01,114 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_996159037_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842561078, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842561078/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-10T14:56:01,117 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-10T14:56:01,135 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/snaptb0-testExportFileSystemState to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842561078/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-10T14:56:01,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741974_1150 (size=630) 2024-12-10T14:56:01,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741974_1150 (size=630) 2024-12-10T14:56:01,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741974_1150 (size=630) 2024-12-10T14:56:01,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741975_1151 (size=165) 2024-12-10T14:56:01,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741975_1151 (size=165) 2024-12-10T14:56:01,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741975_1151 (size=165) 2024-12-10T14:56:01,180 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-10T14:56:01,180 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-10T14:56:01,181 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-10T14:56:01,181 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-10T14:56:02,252 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/hadoop-1300475096932276176.jar 2024-12-10T14:56:02,252 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-10T14:56:02,252 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-10T14:56:02,323 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/hadoop-11044760984354061102.jar 2024-12-10T14:56:02,323 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-10T14:56:02,323 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-10T14:56:02,324 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-10T14:56:02,324 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-10T14:56:02,324 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-10T14:56:02,325 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-10T14:56:02,325 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-10T14:56:02,325 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-10T14:56:02,325 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-10T14:56:02,326 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-10T14:56:02,326 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-10T14:56:02,326 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-10T14:56:02,326 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-10T14:56:02,327 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-10T14:56:02,327 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-10T14:56:02,327 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-10T14:56:02,327 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-10T14:56:02,327 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-10T14:56:02,328 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-10T14:56:02,328 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-10T14:56:02,328 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-10T14:56:02,329 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-10T14:56:02,329 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-10T14:56:02,329 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-10T14:56:02,329 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-10T14:56:02,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741976_1152 (size=127628) 2024-12-10T14:56:02,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741976_1152 (size=127628) 2024-12-10T14:56:02,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741976_1152 (size=127628) 2024-12-10T14:56:02,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741977_1153 (size=2172101) 2024-12-10T14:56:02,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741977_1153 (size=2172101) 2024-12-10T14:56:02,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741977_1153 (size=2172101) 2024-12-10T14:56:02,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741978_1154 (size=6350861) 2024-12-10T14:56:02,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741978_1154 (size=6350861) 2024-12-10T14:56:02,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741978_1154 (size=6350861) 2024-12-10T14:56:02,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741979_1155 (size=213228) 2024-12-10T14:56:02,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741979_1155 (size=213228) 2024-12-10T14:56:02,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741979_1155 (size=213228) 2024-12-10T14:56:02,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741980_1156 (size=1877034) 2024-12-10T14:56:02,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741980_1156 (size=1877034) 2024-12-10T14:56:02,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741980_1156 (size=1877034) 2024-12-10T14:56:02,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741981_1157 (size=533455) 2024-12-10T14:56:02,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741981_1157 (size=533455) 2024-12-10T14:56:02,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741981_1157 (size=533455) 2024-12-10T14:56:02,592 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-12-10T14:56:02,593 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-12-10T14:56:02,594 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-12-10T14:56:02,594 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-12-10T14:56:02,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741982_1158 (size=7280644) 2024-12-10T14:56:02,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741982_1158 (size=7280644) 2024-12-10T14:56:02,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741982_1158 (size=7280644) 2024-12-10T14:56:02,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741983_1159 (size=4188619) 2024-12-10T14:56:02,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741983_1159 (size=4188619) 2024-12-10T14:56:02,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741983_1159 (size=4188619) 2024-12-10T14:56:02,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741984_1160 (size=20406) 2024-12-10T14:56:02,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741984_1160 (size=20406) 2024-12-10T14:56:02,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741984_1160 (size=20406) 2024-12-10T14:56:02,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741985_1161 (size=75495) 2024-12-10T14:56:02,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741985_1161 (size=75495) 2024-12-10T14:56:02,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741985_1161 (size=75495) 2024-12-10T14:56:02,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741986_1162 (size=45609) 2024-12-10T14:56:02,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741986_1162 (size=45609) 2024-12-10T14:56:02,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741986_1162 (size=45609) 2024-12-10T14:56:02,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741987_1163 (size=110084) 2024-12-10T14:56:02,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741987_1163 (size=110084) 2024-12-10T14:56:02,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741987_1163 (size=110084) 2024-12-10T14:56:02,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741988_1164 (size=1323991) 2024-12-10T14:56:02,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741988_1164 (size=1323991) 2024-12-10T14:56:02,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741988_1164 (size=1323991) 2024-12-10T14:56:02,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741989_1165 (size=23076) 2024-12-10T14:56:02,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741989_1165 (size=23076) 2024-12-10T14:56:02,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741989_1165 (size=23076) 2024-12-10T14:56:02,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741990_1166 (size=451756) 2024-12-10T14:56:02,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741990_1166 (size=451756) 2024-12-10T14:56:02,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741990_1166 (size=451756) 2024-12-10T14:56:02,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741991_1167 (size=126803) 2024-12-10T14:56:02,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741991_1167 (size=126803) 2024-12-10T14:56:02,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741991_1167 (size=126803) 2024-12-10T14:56:02,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741992_1168 (size=322274) 2024-12-10T14:56:02,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741992_1168 (size=322274) 2024-12-10T14:56:02,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741992_1168 (size=322274) 2024-12-10T14:56:02,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741993_1169 (size=1832290) 2024-12-10T14:56:02,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741993_1169 (size=1832290) 2024-12-10T14:56:02,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741993_1169 (size=1832290) 2024-12-10T14:56:02,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741994_1170 (size=30081) 2024-12-10T14:56:02,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741994_1170 (size=30081) 2024-12-10T14:56:02,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741994_1170 (size=30081) 2024-12-10T14:56:02,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741995_1171 (size=53616) 2024-12-10T14:56:02,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741995_1171 (size=53616) 2024-12-10T14:56:02,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741995_1171 (size=53616) 2024-12-10T14:56:02,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741996_1172 (size=29229) 2024-12-10T14:56:02,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741996_1172 (size=29229) 2024-12-10T14:56:02,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741996_1172 (size=29229) 2024-12-10T14:56:02,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741997_1173 (size=169089) 2024-12-10T14:56:02,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741997_1173 (size=169089) 2024-12-10T14:56:02,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741997_1173 (size=169089) 2024-12-10T14:56:02,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741998_1174 (size=5175431) 2024-12-10T14:56:02,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741998_1174 (size=5175431) 2024-12-10T14:56:02,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741998_1174 (size=5175431) 2024-12-10T14:56:02,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741999_1175 (size=136454) 2024-12-10T14:56:02,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741999_1175 (size=136454) 2024-12-10T14:56:02,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741999_1175 (size=136454) 2024-12-10T14:56:02,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742000_1176 (size=907858) 2024-12-10T14:56:02,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742000_1176 (size=907858) 2024-12-10T14:56:02,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742000_1176 (size=907858) 2024-12-10T14:56:02,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742001_1177 (size=3317408) 2024-12-10T14:56:02,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742001_1177 (size=3317408) 2024-12-10T14:56:02,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742001_1177 (size=3317408) 2024-12-10T14:56:02,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742002_1178 (size=503880) 2024-12-10T14:56:02,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742002_1178 (size=503880) 2024-12-10T14:56:02,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742002_1178 (size=503880) 2024-12-10T14:56:02,923 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733842510486_0002_000001 (auth:SIMPLE) from 127.0.0.1:40538 2024-12-10T14:56:02,941 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-1_1/usercache/jenkins/appcache/application_1733842510486_0002/container_1733842510486_0002_01_000001/launch_container.sh] 2024-12-10T14:56:02,941 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-1_1/usercache/jenkins/appcache/application_1733842510486_0002/container_1733842510486_0002_01_000001/container_tokens] 2024-12-10T14:56:02,941 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-1_1/usercache/jenkins/appcache/application_1733842510486_0002/container_1733842510486_0002_01_000001/sysfs] 2024-12-10T14:56:02,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742003_1179 (size=4695811) 2024-12-10T14:56:02,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742003_1179 (size=4695811) 2024-12-10T14:56:02,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742003_1179 (size=4695811) 2024-12-10T14:56:02,953 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-10T14:56:02,956 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testExportFileSystemState' hfile list 2024-12-10T14:56:02,958 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K 2024-12-10T14:56:02,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742004_1180 (size=344) 2024-12-10T14:56:02,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742004_1180 (size=344) 2024-12-10T14:56:02,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742004_1180 (size=344) 2024-12-10T14:56:02,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742005_1181 (size=15) 2024-12-10T14:56:02,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742005_1181 (size=15) 2024-12-10T14:56:02,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742005_1181 (size=15) 2024-12-10T14:56:02,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742006_1182 (size=304891) 2024-12-10T14:56:02,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742006_1182 (size=304891) 2024-12-10T14:56:02,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742006_1182 (size=304891) 2024-12-10T14:56:03,020 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-10T14:56:03,020 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-10T14:56:03,072 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733842510486_0003_000001 (auth:SIMPLE) from 127.0.0.1:51518 2024-12-10T14:56:04,197 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-10T14:56:06,006 DEBUG [master/db1d50717577:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region d8b1ebedae14d6d5b339ab8f1bb779d7 changed from -1.0 to 0.0, refreshing cache 2024-12-10T14:56:06,007 DEBUG [master/db1d50717577:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 99bb2446a6b84f36bf016b19fe4722e6 changed from -1.0 to 0.0, refreshing cache 2024-12-10T14:56:06,008 DEBUG [master/db1d50717577:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region fa5bef8ce85335a6dd0147f15cb9e220 changed from -1.0 to 0.0, refreshing cache 2024-12-10T14:56:06,008 DEBUG [master/db1d50717577:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 763faff5dd874fa35c8dd37f5437c5e6 changed from -1.0 to 0.0, refreshing cache 2024-12-10T14:56:09,217 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733842510486_0003_000001 (auth:SIMPLE) from 127.0.0.1:43626 2024-12-10T14:56:09,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742007_1183 (size=350565) 2024-12-10T14:56:09,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742007_1183 (size=350565) 2024-12-10T14:56:09,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742007_1183 (size=350565) 2024-12-10T14:56:11,580 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733842510486_0003_000001 (auth:SIMPLE) from 127.0.0.1:55496 2024-12-10T14:56:15,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742008_1184 (size=8188) 2024-12-10T14:56:15,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742008_1184 (size=8188) 2024-12-10T14:56:15,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742008_1184 (size=8188) 2024-12-10T14:56:15,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742009_1185 (size=5422) 2024-12-10T14:56:15,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742009_1185 (size=5422) 2024-12-10T14:56:15,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742009_1185 (size=5422) 2024-12-10T14:56:15,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742010_1186 (size=17422) 2024-12-10T14:56:15,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742010_1186 (size=17422) 2024-12-10T14:56:15,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742010_1186 (size=17422) 2024-12-10T14:56:15,806 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-1_1/usercache/jenkins/appcache/application_1733842510486_0003/container_1733842510486_0003_01_000002/launch_container.sh] 2024-12-10T14:56:15,806 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-1_1/usercache/jenkins/appcache/application_1733842510486_0003/container_1733842510486_0003_01_000002/container_tokens] 2024-12-10T14:56:15,806 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-1_1/usercache/jenkins/appcache/application_1733842510486_0003/container_1733842510486_0003_01_000002/sysfs] 2024-12-10T14:56:16,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742011_1187 (size=465) 2024-12-10T14:56:16,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742011_1187 (size=465) 2024-12-10T14:56:16,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742011_1187 (size=465) 2024-12-10T14:56:16,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742012_1188 (size=17422) 2024-12-10T14:56:16,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742012_1188 (size=17422) 2024-12-10T14:56:16,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742012_1188 (size=17422) 2024-12-10T14:56:16,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742013_1189 (size=350565) 2024-12-10T14:56:16,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742013_1189 (size=350565) 2024-12-10T14:56:16,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742013_1189 (size=350565) 2024-12-10T14:56:16,322 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733842510486_0003_000001 (auth:SIMPLE) from 127.0.0.1:55512 2024-12-10T14:56:18,163 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-10T14:56:18,165 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-10T14:56:18,183 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb0-testExportFileSystemState 2024-12-10T14:56:18,183 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-10T14:56:18,184 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-10T14:56:18,184 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_996159037_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-10T14:56:18,185 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-12-10T14:56:18,185 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-12-10T14:56:18,185 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_996159037_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842561078/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842561078/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-10T14:56:18,186 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842561078/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-12-10T14:56:18,186 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842561078/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-12-10T14:56:18,192 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportFileSystemState 2024-12-10T14:56:18,193 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemState 2024-12-10T14:56:18,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] procedure2.ProcedureExecutor(1098): Stored pid=74, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportFileSystemState 2024-12-10T14:56:18,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=74 2024-12-10T14:56:18,196 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733842578196"}]},"ts":"1733842578196"} 2024-12-10T14:56:18,197 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemState, state=DISABLING in hbase:meta 2024-12-10T14:56:18,229 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemState to state=DISABLING 2024-12-10T14:56:18,229 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=75, ppid=74, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportFileSystemState}] 2024-12-10T14:56:18,231 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=76, ppid=75, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=d8b1ebedae14d6d5b339ab8f1bb779d7, UNASSIGN}, {pid=77, ppid=75, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=fa5bef8ce85335a6dd0147f15cb9e220, UNASSIGN}] 2024-12-10T14:56:18,231 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=76, ppid=75, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=d8b1ebedae14d6d5b339ab8f1bb779d7, UNASSIGN 2024-12-10T14:56:18,232 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=77, ppid=75, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=fa5bef8ce85335a6dd0147f15cb9e220, UNASSIGN 2024-12-10T14:56:18,232 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=76 updating hbase:meta row=d8b1ebedae14d6d5b339ab8f1bb779d7, regionState=CLOSING, regionLocation=db1d50717577,37707,1733842502956 2024-12-10T14:56:18,232 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=77 updating hbase:meta row=fa5bef8ce85335a6dd0147f15cb9e220, regionState=CLOSING, regionLocation=db1d50717577,41775,1733842502887 2024-12-10T14:56:18,234 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-10T14:56:18,234 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=78, ppid=77, state=RUNNABLE; CloseRegionProcedure fa5bef8ce85335a6dd0147f15cb9e220, server=db1d50717577,41775,1733842502887}] 2024-12-10T14:56:18,234 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-10T14:56:18,234 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=79, ppid=76, state=RUNNABLE; CloseRegionProcedure d8b1ebedae14d6d5b339ab8f1bb779d7, server=db1d50717577,37707,1733842502956}] 2024-12-10T14:56:18,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=74 2024-12-10T14:56:18,385 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,41775,1733842502887 2024-12-10T14:56:18,386 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] handler.UnassignRegionHandler(124): Close fa5bef8ce85335a6dd0147f15cb9e220 2024-12-10T14:56:18,386 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,37707,1733842502956 2024-12-10T14:56:18,386 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-10T14:56:18,387 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1681): Closing fa5bef8ce85335a6dd0147f15cb9e220, disabling compactions & flushes 2024-12-10T14:56:18,387 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemState,1,1733842559421.fa5bef8ce85335a6dd0147f15cb9e220. 2024-12-10T14:56:18,387 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemState,1,1733842559421.fa5bef8ce85335a6dd0147f15cb9e220. 2024-12-10T14:56:18,387 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemState,1,1733842559421.fa5bef8ce85335a6dd0147f15cb9e220. after waiting 0 ms 2024-12-10T14:56:18,387 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemState,1,1733842559421.fa5bef8ce85335a6dd0147f15cb9e220. 2024-12-10T14:56:18,387 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] handler.UnassignRegionHandler(124): Close d8b1ebedae14d6d5b339ab8f1bb779d7 2024-12-10T14:56:18,387 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-10T14:56:18,387 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1681): Closing d8b1ebedae14d6d5b339ab8f1bb779d7, disabling compactions & flushes 2024-12-10T14:56:18,387 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemState,,1733842559421.d8b1ebedae14d6d5b339ab8f1bb779d7. 2024-12-10T14:56:18,387 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemState,,1733842559421.d8b1ebedae14d6d5b339ab8f1bb779d7. 2024-12-10T14:56:18,387 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemState,,1733842559421.d8b1ebedae14d6d5b339ab8f1bb779d7. after waiting 0 ms 2024-12-10T14:56:18,387 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemState,,1733842559421.d8b1ebedae14d6d5b339ab8f1bb779d7. 2024-12-10T14:56:18,400 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemState/fa5bef8ce85335a6dd0147f15cb9e220/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-10T14:56:18,400 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemState/d8b1ebedae14d6d5b339ab8f1bb779d7/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-10T14:56:18,401 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-10T14:56:18,401 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-10T14:56:18,401 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemState,1,1733842559421.fa5bef8ce85335a6dd0147f15cb9e220. 2024-12-10T14:56:18,401 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemState,,1733842559421.d8b1ebedae14d6d5b339ab8f1bb779d7. 2024-12-10T14:56:18,401 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1635): Region close journal for fa5bef8ce85335a6dd0147f15cb9e220: 2024-12-10T14:56:18,401 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1635): Region close journal for d8b1ebedae14d6d5b339ab8f1bb779d7: 2024-12-10T14:56:18,404 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=76 updating hbase:meta row=d8b1ebedae14d6d5b339ab8f1bb779d7, regionState=CLOSED 2024-12-10T14:56:18,405 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] handler.UnassignRegionHandler(170): Closed fa5bef8ce85335a6dd0147f15cb9e220 2024-12-10T14:56:18,405 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] handler.UnassignRegionHandler(170): Closed d8b1ebedae14d6d5b339ab8f1bb779d7 2024-12-10T14:56:18,405 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=77 updating hbase:meta row=fa5bef8ce85335a6dd0147f15cb9e220, regionState=CLOSED 2024-12-10T14:56:18,409 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=79, resume processing ppid=76 2024-12-10T14:56:18,409 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=79, ppid=76, state=SUCCESS; CloseRegionProcedure d8b1ebedae14d6d5b339ab8f1bb779d7, server=db1d50717577,37707,1733842502956 in 172 msec 2024-12-10T14:56:18,409 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=78, resume processing ppid=77 2024-12-10T14:56:18,409 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=78, ppid=77, state=SUCCESS; CloseRegionProcedure fa5bef8ce85335a6dd0147f15cb9e220, server=db1d50717577,41775,1733842502887 in 173 msec 2024-12-10T14:56:18,411 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=76, ppid=75, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=d8b1ebedae14d6d5b339ab8f1bb779d7, UNASSIGN in 178 msec 2024-12-10T14:56:18,412 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=77, resume processing ppid=75 2024-12-10T14:56:18,412 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=77, ppid=75, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=fa5bef8ce85335a6dd0147f15cb9e220, UNASSIGN in 178 msec 2024-12-10T14:56:18,415 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=75, resume processing ppid=74 2024-12-10T14:56:18,415 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=75, ppid=74, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportFileSystemState in 184 msec 2024-12-10T14:56:18,417 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733842578417"}]},"ts":"1733842578417"} 2024-12-10T14:56:18,419 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemState, state=DISABLED in hbase:meta 2024-12-10T14:56:18,452 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemState to state=DISABLED 2024-12-10T14:56:18,454 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=74, state=SUCCESS; DisableTableProcedure table=testtb-testExportFileSystemState in 260 msec 2024-12-10T14:56:18,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=74 2024-12-10T14:56:18,499 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemState, procId: 74 completed 2024-12-10T14:56:18,500 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemState 2024-12-10T14:56:18,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] procedure2.ProcedureExecutor(1098): Stored pid=80, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-10T14:56:18,504 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=80, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-10T14:56:18,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportFileSystemState 2024-12-10T14:56:18,504 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=80, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-10T14:56:18,507 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41775 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemState 2024-12-10T14:56:18,510 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemState/d8b1ebedae14d6d5b339ab8f1bb779d7 2024-12-10T14:56:18,510 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemState/fa5bef8ce85335a6dd0147f15cb9e220 2024-12-10T14:56:18,514 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemState/d8b1ebedae14d6d5b339ab8f1bb779d7/cf, FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemState/d8b1ebedae14d6d5b339ab8f1bb779d7/recovered.edits] 2024-12-10T14:56:18,514 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemState/fa5bef8ce85335a6dd0147f15cb9e220/cf, FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemState/fa5bef8ce85335a6dd0147f15cb9e220/recovered.edits] 2024-12-10T14:56:18,520 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemState/d8b1ebedae14d6d5b339ab8f1bb779d7/cf/a613c9d39b3b472c9a16f69a03f4aeae to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/archive/data/default/testtb-testExportFileSystemState/d8b1ebedae14d6d5b339ab8f1bb779d7/cf/a613c9d39b3b472c9a16f69a03f4aeae 2024-12-10T14:56:18,520 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemState/fa5bef8ce85335a6dd0147f15cb9e220/cf/56959a52bcd34f9bbb820d21225ec0f5 to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/archive/data/default/testtb-testExportFileSystemState/fa5bef8ce85335a6dd0147f15cb9e220/cf/56959a52bcd34f9bbb820d21225ec0f5 2024-12-10T14:56:18,523 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemState/d8b1ebedae14d6d5b339ab8f1bb779d7/recovered.edits/9.seqid to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/archive/data/default/testtb-testExportFileSystemState/d8b1ebedae14d6d5b339ab8f1bb779d7/recovered.edits/9.seqid 2024-12-10T14:56:18,524 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemState/d8b1ebedae14d6d5b339ab8f1bb779d7 2024-12-10T14:56:18,524 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemState/fa5bef8ce85335a6dd0147f15cb9e220/recovered.edits/9.seqid to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/archive/data/default/testtb-testExportFileSystemState/fa5bef8ce85335a6dd0147f15cb9e220/recovered.edits/9.seqid 2024-12-10T14:56:18,525 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemState/fa5bef8ce85335a6dd0147f15cb9e220 2024-12-10T14:56:18,525 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemState regions 2024-12-10T14:56:18,527 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=80, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-10T14:56:18,531 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportFileSystemState from hbase:meta 2024-12-10T14:56:18,537 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportFileSystemState' descriptor. 2024-12-10T14:56:18,538 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=80, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-10T14:56:18,538 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportFileSystemState' from region states. 2024-12-10T14:56:18,539 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,,1733842559421.d8b1ebedae14d6d5b339ab8f1bb779d7.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733842578538"}]},"ts":"9223372036854775807"} 2024-12-10T14:56:18,539 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,1,1733842559421.fa5bef8ce85335a6dd0147f15cb9e220.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733842578538"}]},"ts":"9223372036854775807"} 2024-12-10T14:56:18,543 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-10T14:56:18,543 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => d8b1ebedae14d6d5b339ab8f1bb779d7, NAME => 'testtb-testExportFileSystemState,,1733842559421.d8b1ebedae14d6d5b339ab8f1bb779d7.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => fa5bef8ce85335a6dd0147f15cb9e220, NAME => 'testtb-testExportFileSystemState,1,1733842559421.fa5bef8ce85335a6dd0147f15cb9e220.', STARTKEY => '1', ENDKEY => ''}] 2024-12-10T14:56:18,543 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportFileSystemState' as deleted. 2024-12-10T14:56:18,544 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733842578543"}]},"ts":"9223372036854775807"} 2024-12-10T14:56:18,548 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportFileSystemState state from META 2024-12-10T14:56:18,575 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-10T14:56:18,575 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45255-0x1019cdee8310003, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-10T14:56:18,575 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-10T14:56:18,575 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-10T14:56:18,614 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-10T14:56:18,614 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-10T14:56:18,614 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-10T14:56:18,614 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-10T14:56:18,615 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=80, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-10T14:56:18,616 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=80, state=SUCCESS; DeleteTableProcedure table=testtb-testExportFileSystemState in 115 msec 2024-12-10T14:56:18,652 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-10T14:56:18,652 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-10T14:56:18,652 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-10T14:56:18,652 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:56:18,652 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:56:18,652 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:56:18,652 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45255-0x1019cdee8310003, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-10T14:56:18,652 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45255-0x1019cdee8310003, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:56:18,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=80 2024-12-10T14:56:18,654 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportFileSystemState, procId: 80 completed 2024-12-10T14:56:18,693 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemState" 2024-12-10T14:56:18,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportFileSystemState 2024-12-10T14:56:18,696 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemState" 2024-12-10T14:56:18,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportFileSystemState 2024-12-10T14:56:18,722 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemState Thread=798 (was 803), OpenFileDescriptor=817 (was 815) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=527 (was 481) - SystemLoadAverage LEAK? -, ProcessCount=17 (was 17), AvailableMemoryMB=2064 (was 2141) 2024-12-10T14:56:18,722 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=798 is superior to 500 2024-12-10T14:56:18,738 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testConsecutiveExports Thread=798, OpenFileDescriptor=817, MaxFileDescriptor=1048576, SystemLoadAverage=527, ProcessCount=17, AvailableMemoryMB=2074 2024-12-10T14:56:18,738 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=798 is superior to 500 2024-12-10T14:56:18,740 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-10T14:56:18,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] procedure2.ProcedureExecutor(1098): Stored pid=81, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testConsecutiveExports 2024-12-10T14:56:18,742 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=81, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_PRE_OPERATION 2024-12-10T14:56:18,742 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:56:18,742 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testConsecutiveExports" procId is: 81 2024-12-10T14:56:18,743 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=81, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-10T14:56:18,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-10T14:56:18,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742014_1190 (size=404) 2024-12-10T14:56:18,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742014_1190 (size=404) 2024-12-10T14:56:18,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742014_1190 (size=404) 2024-12-10T14:56:18,752 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 3544e35340615a556e248ecbe6a8f3f1, NAME => 'testtb-testConsecutiveExports,1,1733842578740.3544e35340615a556e248ecbe6a8f3f1.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef 2024-12-10T14:56:18,752 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 9c689fc20e3215e22342ca57113b11b8, NAME => 'testtb-testConsecutiveExports,,1733842578740.9c689fc20e3215e22342ca57113b11b8.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef 2024-12-10T14:56:18,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742015_1191 (size=65) 2024-12-10T14:56:18,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742016_1192 (size=65) 2024-12-10T14:56:18,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742016_1192 (size=65) 2024-12-10T14:56:18,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742015_1191 (size=65) 2024-12-10T14:56:18,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742015_1191 (size=65) 2024-12-10T14:56:18,763 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testConsecutiveExports,,1733842578740.9c689fc20e3215e22342ca57113b11b8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:56:18,763 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1681): Closing 9c689fc20e3215e22342ca57113b11b8, disabling compactions & flushes 2024-12-10T14:56:18,763 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testConsecutiveExports,,1733842578740.9c689fc20e3215e22342ca57113b11b8. 2024-12-10T14:56:18,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742016_1192 (size=65) 2024-12-10T14:56:18,763 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testConsecutiveExports,,1733842578740.9c689fc20e3215e22342ca57113b11b8. 2024-12-10T14:56:18,763 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testConsecutiveExports,,1733842578740.9c689fc20e3215e22342ca57113b11b8. after waiting 0 ms 2024-12-10T14:56:18,763 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testConsecutiveExports,,1733842578740.9c689fc20e3215e22342ca57113b11b8. 2024-12-10T14:56:18,763 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testConsecutiveExports,,1733842578740.9c689fc20e3215e22342ca57113b11b8. 2024-12-10T14:56:18,764 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1635): Region close journal for 9c689fc20e3215e22342ca57113b11b8: 2024-12-10T14:56:18,764 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testConsecutiveExports,1,1733842578740.3544e35340615a556e248ecbe6a8f3f1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:56:18,764 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1681): Closing 3544e35340615a556e248ecbe6a8f3f1, disabling compactions & flushes 2024-12-10T14:56:18,764 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testConsecutiveExports,1,1733842578740.3544e35340615a556e248ecbe6a8f3f1. 2024-12-10T14:56:18,764 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testConsecutiveExports,1,1733842578740.3544e35340615a556e248ecbe6a8f3f1. 2024-12-10T14:56:18,764 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testConsecutiveExports,1,1733842578740.3544e35340615a556e248ecbe6a8f3f1. after waiting 0 ms 2024-12-10T14:56:18,764 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testConsecutiveExports,1,1733842578740.3544e35340615a556e248ecbe6a8f3f1. 2024-12-10T14:56:18,764 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testConsecutiveExports,1,1733842578740.3544e35340615a556e248ecbe6a8f3f1. 2024-12-10T14:56:18,764 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1635): Region close journal for 3544e35340615a556e248ecbe6a8f3f1: 2024-12-10T14:56:18,765 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=81, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ADD_TO_META 2024-12-10T14:56:18,765 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,,1733842578740.9c689fc20e3215e22342ca57113b11b8.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733842578765"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733842578765"}]},"ts":"1733842578765"} 2024-12-10T14:56:18,765 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,1,1733842578740.3544e35340615a556e248ecbe6a8f3f1.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733842578765"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733842578765"}]},"ts":"1733842578765"} 2024-12-10T14:56:18,768 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-10T14:56:18,769 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=81, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-10T14:56:18,769 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733842578769"}]},"ts":"1733842578769"} 2024-12-10T14:56:18,770 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testConsecutiveExports, state=ENABLING in hbase:meta 2024-12-10T14:56:18,836 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(202): Hosts are {db1d50717577=0} racks are {/default-rack=0} 2024-12-10T14:56:18,838 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-10T14:56:18,838 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-10T14:56:18,838 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-10T14:56:18,838 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-10T14:56:18,838 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-10T14:56:18,838 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-10T14:56:18,838 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-10T14:56:18,838 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=9c689fc20e3215e22342ca57113b11b8, ASSIGN}, {pid=83, ppid=81, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=3544e35340615a556e248ecbe6a8f3f1, ASSIGN}] 2024-12-10T14:56:18,839 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=82, ppid=81, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=9c689fc20e3215e22342ca57113b11b8, ASSIGN 2024-12-10T14:56:18,839 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=83, ppid=81, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=3544e35340615a556e248ecbe6a8f3f1, ASSIGN 2024-12-10T14:56:18,840 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=82, ppid=81, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=9c689fc20e3215e22342ca57113b11b8, ASSIGN; state=OFFLINE, location=db1d50717577,41775,1733842502887; forceNewPlan=false, retain=false 2024-12-10T14:56:18,841 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=83, ppid=81, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=3544e35340615a556e248ecbe6a8f3f1, ASSIGN; state=OFFLINE, location=db1d50717577,37707,1733842502956; forceNewPlan=false, retain=false 2024-12-10T14:56:18,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-10T14:56:18,990 INFO [db1d50717577:36533 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-10T14:56:18,991 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=83 updating hbase:meta row=3544e35340615a556e248ecbe6a8f3f1, regionState=OPENING, regionLocation=db1d50717577,37707,1733842502956 2024-12-10T14:56:18,991 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=82 updating hbase:meta row=9c689fc20e3215e22342ca57113b11b8, regionState=OPENING, regionLocation=db1d50717577,41775,1733842502887 2024-12-10T14:56:18,993 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE; OpenRegionProcedure 3544e35340615a556e248ecbe6a8f3f1, server=db1d50717577,37707,1733842502956}] 2024-12-10T14:56:18,993 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=85, ppid=82, state=RUNNABLE; OpenRegionProcedure 9c689fc20e3215e22342ca57113b11b8, server=db1d50717577,41775,1733842502887}] 2024-12-10T14:56:19,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-10T14:56:19,144 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,37707,1733842502956 2024-12-10T14:56:19,145 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,41775,1733842502887 2024-12-10T14:56:19,147 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] handler.AssignRegionHandler(135): Open testtb-testConsecutiveExports,1,1733842578740.3544e35340615a556e248ecbe6a8f3f1. 2024-12-10T14:56:19,147 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(7285): Opening region: {ENCODED => 3544e35340615a556e248ecbe6a8f3f1, NAME => 'testtb-testConsecutiveExports,1,1733842578740.3544e35340615a556e248ecbe6a8f3f1.', STARTKEY => '1', ENDKEY => ''} 2024-12-10T14:56:19,148 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testConsecutiveExports,1,1733842578740.3544e35340615a556e248ecbe6a8f3f1. service=AccessControlService 2024-12-10T14:56:19,148 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] handler.AssignRegionHandler(135): Open testtb-testConsecutiveExports,,1733842578740.9c689fc20e3215e22342ca57113b11b8. 2024-12-10T14:56:19,148 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(7285): Opening region: {ENCODED => 9c689fc20e3215e22342ca57113b11b8, NAME => 'testtb-testConsecutiveExports,,1733842578740.9c689fc20e3215e22342ca57113b11b8.', STARTKEY => '', ENDKEY => '1'} 2024-12-10T14:56:19,148 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-10T14:56:19,148 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports 3544e35340615a556e248ecbe6a8f3f1 2024-12-10T14:56:19,148 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(894): Instantiated testtb-testConsecutiveExports,1,1733842578740.3544e35340615a556e248ecbe6a8f3f1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:56:19,149 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(7327): checking encryption for 3544e35340615a556e248ecbe6a8f3f1 2024-12-10T14:56:19,149 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(7330): checking classloading for 3544e35340615a556e248ecbe6a8f3f1 2024-12-10T14:56:19,149 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testConsecutiveExports,,1733842578740.9c689fc20e3215e22342ca57113b11b8. service=AccessControlService 2024-12-10T14:56:19,149 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-10T14:56:19,149 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports 9c689fc20e3215e22342ca57113b11b8 2024-12-10T14:56:19,149 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(894): Instantiated testtb-testConsecutiveExports,,1733842578740.9c689fc20e3215e22342ca57113b11b8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:56:19,149 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(7327): checking encryption for 9c689fc20e3215e22342ca57113b11b8 2024-12-10T14:56:19,149 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(7330): checking classloading for 9c689fc20e3215e22342ca57113b11b8 2024-12-10T14:56:19,150 INFO [StoreOpener-3544e35340615a556e248ecbe6a8f3f1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 3544e35340615a556e248ecbe6a8f3f1 2024-12-10T14:56:19,151 INFO [StoreOpener-9c689fc20e3215e22342ca57113b11b8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 9c689fc20e3215e22342ca57113b11b8 2024-12-10T14:56:19,152 INFO [StoreOpener-3544e35340615a556e248ecbe6a8f3f1-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3544e35340615a556e248ecbe6a8f3f1 columnFamilyName cf 2024-12-10T14:56:19,152 DEBUG [StoreOpener-3544e35340615a556e248ecbe6a8f3f1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:56:19,152 INFO [StoreOpener-9c689fc20e3215e22342ca57113b11b8-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9c689fc20e3215e22342ca57113b11b8 columnFamilyName cf 2024-12-10T14:56:19,152 DEBUG [StoreOpener-9c689fc20e3215e22342ca57113b11b8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:56:19,153 INFO [StoreOpener-3544e35340615a556e248ecbe6a8f3f1-1 {}] regionserver.HStore(327): Store=3544e35340615a556e248ecbe6a8f3f1/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T14:56:19,153 INFO [StoreOpener-9c689fc20e3215e22342ca57113b11b8-1 {}] regionserver.HStore(327): Store=9c689fc20e3215e22342ca57113b11b8/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T14:56:19,154 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testConsecutiveExports/3544e35340615a556e248ecbe6a8f3f1 2024-12-10T14:56:19,154 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testConsecutiveExports/9c689fc20e3215e22342ca57113b11b8 2024-12-10T14:56:19,154 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testConsecutiveExports/3544e35340615a556e248ecbe6a8f3f1 2024-12-10T14:56:19,154 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testConsecutiveExports/9c689fc20e3215e22342ca57113b11b8 2024-12-10T14:56:19,156 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(1085): writing seq id for 3544e35340615a556e248ecbe6a8f3f1 2024-12-10T14:56:19,156 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(1085): writing seq id for 9c689fc20e3215e22342ca57113b11b8 2024-12-10T14:56:19,158 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testConsecutiveExports/3544e35340615a556e248ecbe6a8f3f1/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T14:56:19,158 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testConsecutiveExports/9c689fc20e3215e22342ca57113b11b8/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T14:56:19,159 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(1102): Opened 3544e35340615a556e248ecbe6a8f3f1; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69478656, jitterRate=0.035312652587890625}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-10T14:56:19,159 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(1102): Opened 9c689fc20e3215e22342ca57113b11b8; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59185598, jitterRate=-0.11806586384773254}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-10T14:56:19,160 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(1001): Region open journal for 9c689fc20e3215e22342ca57113b11b8: 2024-12-10T14:56:19,160 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(1001): Region open journal for 3544e35340615a556e248ecbe6a8f3f1: 2024-12-10T14:56:19,161 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testConsecutiveExports,,1733842578740.9c689fc20e3215e22342ca57113b11b8., pid=85, masterSystemTime=1733842579145 2024-12-10T14:56:19,161 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testConsecutiveExports,1,1733842578740.3544e35340615a556e248ecbe6a8f3f1., pid=84, masterSystemTime=1733842579144 2024-12-10T14:56:19,162 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testConsecutiveExports,,1733842578740.9c689fc20e3215e22342ca57113b11b8. 2024-12-10T14:56:19,162 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] handler.AssignRegionHandler(164): Opened testtb-testConsecutiveExports,,1733842578740.9c689fc20e3215e22342ca57113b11b8. 2024-12-10T14:56:19,163 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=82 updating hbase:meta row=9c689fc20e3215e22342ca57113b11b8, regionState=OPEN, openSeqNum=2, regionLocation=db1d50717577,41775,1733842502887 2024-12-10T14:56:19,163 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testConsecutiveExports,1,1733842578740.3544e35340615a556e248ecbe6a8f3f1. 2024-12-10T14:56:19,163 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] handler.AssignRegionHandler(164): Opened testtb-testConsecutiveExports,1,1733842578740.3544e35340615a556e248ecbe6a8f3f1. 2024-12-10T14:56:19,163 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=83 updating hbase:meta row=3544e35340615a556e248ecbe6a8f3f1, regionState=OPEN, openSeqNum=2, regionLocation=db1d50717577,37707,1733842502956 2024-12-10T14:56:19,166 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=85, resume processing ppid=82 2024-12-10T14:56:19,166 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=85, ppid=82, state=SUCCESS; OpenRegionProcedure 9c689fc20e3215e22342ca57113b11b8, server=db1d50717577,41775,1733842502887 in 171 msec 2024-12-10T14:56:19,167 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=84, resume processing ppid=83 2024-12-10T14:56:19,167 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=84, ppid=83, state=SUCCESS; OpenRegionProcedure 3544e35340615a556e248ecbe6a8f3f1, server=db1d50717577,37707,1733842502956 in 173 msec 2024-12-10T14:56:19,167 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=82, ppid=81, state=SUCCESS; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=9c689fc20e3215e22342ca57113b11b8, ASSIGN in 328 msec 2024-12-10T14:56:19,169 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=83, resume processing ppid=81 2024-12-10T14:56:19,169 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=83, ppid=81, state=SUCCESS; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=3544e35340615a556e248ecbe6a8f3f1, ASSIGN in 329 msec 2024-12-10T14:56:19,169 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=81, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-10T14:56:19,170 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733842579169"}]},"ts":"1733842579169"} 2024-12-10T14:56:19,171 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testConsecutiveExports, state=ENABLED in hbase:meta 2024-12-10T14:56:19,183 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=81, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_POST_OPERATION 2024-12-10T14:56:19,183 DEBUG [PEWorker-3 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testConsecutiveExports jenkins: RWXCA 2024-12-10T14:56:19,185 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41775 {}] access.PermissionStorage(611): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-10T14:56:19,213 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45255-0x1019cdee8310003, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:56:19,213 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:56:19,213 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:56:19,213 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:56:19,245 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-10T14:56:19,245 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-10T14:56:19,245 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-10T14:56:19,245 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-10T14:56:19,246 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=81, state=SUCCESS; CreateTableProcedure table=testtb-testConsecutiveExports in 504 msec 2024-12-10T14:56:19,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-10T14:56:19,346 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testConsecutiveExports, procId: 81 completed 2024-12-10T14:56:19,346 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testConsecutiveExports get assigned. Timeout = 60000ms 2024-12-10T14:56:19,346 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-10T14:56:19,350 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testConsecutiveExports assigned to meta. Checking AM states. 2024-12-10T14:56:19,350 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-10T14:56:19,350 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testConsecutiveExports assigned. 2024-12-10T14:56:19,353 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-10T14:56:19,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733842579353 (current time:1733842579353). 2024-12-10T14:56:19,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-10T14:56:19,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-12-10T14:56:19,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-10T14:56:19,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x432c5e00 to 127.0.0.1:57176 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@741e178 2024-12-10T14:56:19,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@b9cffa3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:56:19,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:56:19,383 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46436, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:56:19,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x432c5e00 to 127.0.0.1:57176 2024-12-10T14:56:19,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:56:19,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1eb179ac to 127.0.0.1:57176 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3f342e3b 2024-12-10T14:56:19,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@53b9af99, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:56:19,424 DEBUG [hconnection-0x243c1aba-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:56:19,425 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46444, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:56:19,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:56:19,428 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42550, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:56:19,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1eb179ac to 127.0.0.1:57176 2024-12-10T14:56:19,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:56:19,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] access.PermissionStorage(611): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-10T14:56:19,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-10T14:56:19,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] procedure2.ProcedureExecutor(1098): Stored pid=86, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-10T14:56:19,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 86 2024-12-10T14:56:19,431 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-10T14:56:19,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=86 2024-12-10T14:56:19,432 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-10T14:56:19,434 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-10T14:56:19,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742017_1193 (size=161) 2024-12-10T14:56:19,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742017_1193 (size=161) 2024-12-10T14:56:19,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742017_1193 (size=161) 2024-12-10T14:56:19,442 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-10T14:56:19,442 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=87, ppid=86, state=RUNNABLE; SnapshotRegionProcedure 9c689fc20e3215e22342ca57113b11b8}, {pid=88, ppid=86, state=RUNNABLE; SnapshotRegionProcedure 3544e35340615a556e248ecbe6a8f3f1}] 2024-12-10T14:56:19,443 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=87, ppid=86, state=RUNNABLE; SnapshotRegionProcedure 9c689fc20e3215e22342ca57113b11b8 2024-12-10T14:56:19,443 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=88, ppid=86, state=RUNNABLE; SnapshotRegionProcedure 3544e35340615a556e248ecbe6a8f3f1 2024-12-10T14:56:19,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=86 2024-12-10T14:56:19,594 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,41775,1733842502887 2024-12-10T14:56:19,594 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,37707,1733842502956 2024-12-10T14:56:19,595 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41775 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=87 2024-12-10T14:56:19,595 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1733842578740.9c689fc20e3215e22342ca57113b11b8. 2024-12-10T14:56:19,595 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37707 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=88 2024-12-10T14:56:19,595 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1733842578740.3544e35340615a556e248ecbe6a8f3f1. 2024-12-10T14:56:19,596 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] regionserver.HRegion(2538): Flush status journal for 9c689fc20e3215e22342ca57113b11b8: 2024-12-10T14:56:19,596 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1733842578740.9c689fc20e3215e22342ca57113b11b8. for emptySnaptb0-testConsecutiveExports completed. 2024-12-10T14:56:19,596 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] regionserver.HRegion(2538): Flush status journal for 3544e35340615a556e248ecbe6a8f3f1: 2024-12-10T14:56:19,596 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1733842578740.9c689fc20e3215e22342ca57113b11b8.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-12-10T14:56:19,596 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1733842578740.3544e35340615a556e248ecbe6a8f3f1. for emptySnaptb0-testConsecutiveExports completed. 2024-12-10T14:56:19,596 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-10T14:56:19,596 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-10T14:56:19,596 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1733842578740.3544e35340615a556e248ecbe6a8f3f1.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-12-10T14:56:19,596 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-10T14:56:19,596 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-10T14:56:19,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742018_1194 (size=68) 2024-12-10T14:56:19,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742018_1194 (size=68) 2024-12-10T14:56:19,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742018_1194 (size=68) 2024-12-10T14:56:19,604 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1733842578740.3544e35340615a556e248ecbe6a8f3f1. 2024-12-10T14:56:19,604 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=88 2024-12-10T14:56:19,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742019_1195 (size=68) 2024-12-10T14:56:19,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.HMaster(4106): Remote procedure done, pid=88 2024-12-10T14:56:19,605 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region 3544e35340615a556e248ecbe6a8f3f1 2024-12-10T14:56:19,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742019_1195 (size=68) 2024-12-10T14:56:19,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742019_1195 (size=68) 2024-12-10T14:56:19,605 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=88, ppid=86, state=RUNNABLE; SnapshotRegionProcedure 3544e35340615a556e248ecbe6a8f3f1 2024-12-10T14:56:19,605 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1733842578740.9c689fc20e3215e22342ca57113b11b8. 2024-12-10T14:56:19,605 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=87 2024-12-10T14:56:19,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.HMaster(4106): Remote procedure done, pid=87 2024-12-10T14:56:19,606 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region 9c689fc20e3215e22342ca57113b11b8 2024-12-10T14:56:19,606 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=87, ppid=86, state=RUNNABLE; SnapshotRegionProcedure 9c689fc20e3215e22342ca57113b11b8 2024-12-10T14:56:19,607 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=88, ppid=86, state=SUCCESS; SnapshotRegionProcedure 3544e35340615a556e248ecbe6a8f3f1 in 164 msec 2024-12-10T14:56:19,608 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=87, resume processing ppid=86 2024-12-10T14:56:19,608 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=87, ppid=86, state=SUCCESS; SnapshotRegionProcedure 9c689fc20e3215e22342ca57113b11b8 in 165 msec 2024-12-10T14:56:19,608 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-10T14:56:19,609 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-10T14:56:19,609 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-10T14:56:19,610 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testConsecutiveExports 2024-12-10T14:56:19,610 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports 2024-12-10T14:56:19,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742020_1196 (size=543) 2024-12-10T14:56:19,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742020_1196 (size=543) 2024-12-10T14:56:19,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742020_1196 (size=543) 2024-12-10T14:56:19,624 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-10T14:56:19,629 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-10T14:56:19,630 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/emptySnaptb0-testConsecutiveExports 2024-12-10T14:56:19,631 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-10T14:56:19,631 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 86 2024-12-10T14:56:19,632 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=86, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 201 msec 2024-12-10T14:56:19,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=86 2024-12-10T14:56:19,734 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports, procId: 86 completed 2024-12-10T14:56:19,741 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41775 {}] regionserver.HRegion(8254): writing data to region testtb-testConsecutiveExports,,1733842578740.9c689fc20e3215e22342ca57113b11b8. with WAL disabled. Data may be lost in the event of a crash. 2024-12-10T14:56:19,743 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37707 {}] regionserver.HRegion(8254): writing data to region testtb-testConsecutiveExports,1,1733842578740.3544e35340615a556e248ecbe6a8f3f1. with WAL disabled. Data may be lost in the event of a crash. 2024-12-10T14:56:19,746 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testConsecutiveExports 2024-12-10T14:56:19,746 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testConsecutiveExports,,1733842578740.9c689fc20e3215e22342ca57113b11b8. 2024-12-10T14:56:19,746 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-10T14:56:19,758 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-10T14:56:19,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733842579758 (current time:1733842579758). 2024-12-10T14:56:19,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-10T14:56:19,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-12-10T14:56:19,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-10T14:56:19,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x31d57960 to 127.0.0.1:57176 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@75f85c2a 2024-12-10T14:56:19,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6feb7026, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:56:19,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:56:19,764 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46454, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:56:19,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x31d57960 to 127.0.0.1:57176 2024-12-10T14:56:19,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:56:19,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2ccbc752 to 127.0.0.1:57176 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1be5d7ed 2024-12-10T14:56:19,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2f3bfef7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:56:19,772 DEBUG [hconnection-0x640bb634-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:56:19,773 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46468, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:56:19,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:56:19,775 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42562, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:56:19,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2ccbc752 to 127.0.0.1:57176 2024-12-10T14:56:19,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:56:19,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] access.PermissionStorage(611): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-10T14:56:19,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-10T14:56:19,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] procedure2.ProcedureExecutor(1098): Stored pid=89, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-10T14:56:19,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 89 2024-12-10T14:56:19,782 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-10T14:56:19,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-10T14:56:19,783 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-10T14:56:19,786 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-10T14:56:19,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742021_1197 (size=156) 2024-12-10T14:56:19,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742021_1197 (size=156) 2024-12-10T14:56:19,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742021_1197 (size=156) 2024-12-10T14:56:19,796 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-10T14:56:19,796 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE; SnapshotRegionProcedure 9c689fc20e3215e22342ca57113b11b8}, {pid=91, ppid=89, state=RUNNABLE; SnapshotRegionProcedure 3544e35340615a556e248ecbe6a8f3f1}] 2024-12-10T14:56:19,797 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=90, ppid=89, state=RUNNABLE; SnapshotRegionProcedure 9c689fc20e3215e22342ca57113b11b8 2024-12-10T14:56:19,797 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=91, ppid=89, state=RUNNABLE; SnapshotRegionProcedure 3544e35340615a556e248ecbe6a8f3f1 2024-12-10T14:56:19,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-10T14:56:19,948 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,41775,1733842502887 2024-12-10T14:56:19,948 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,37707,1733842502956 2024-12-10T14:56:19,949 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37707 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=91 2024-12-10T14:56:19,949 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41775 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=90 2024-12-10T14:56:19,949 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1733842578740.9c689fc20e3215e22342ca57113b11b8. 2024-12-10T14:56:19,949 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1733842578740.3544e35340615a556e248ecbe6a8f3f1. 2024-12-10T14:56:19,949 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.HRegion(2837): Flushing 9c689fc20e3215e22342ca57113b11b8 1/1 column families, dataSize=199 B heapSize=688 B 2024-12-10T14:56:19,949 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.HRegion(2837): Flushing 3544e35340615a556e248ecbe6a8f3f1 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-12-10T14:56:19,966 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testConsecutiveExports/9c689fc20e3215e22342ca57113b11b8/.tmp/cf/595416239e6a4a4c9e1b3abe89681e60 is 71, key is 0602e34a27cfb0dfbca12eab72d54212/cf:q/1733842579741/Put/seqid=0 2024-12-10T14:56:19,967 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testConsecutiveExports/3544e35340615a556e248ecbe6a8f3f1/.tmp/cf/5924360b3a1c49879578e35862929e4f is 71, key is 10de4ff99445e2366b1847f9e931806d/cf:q/1733842579743/Put/seqid=0 2024-12-10T14:56:19,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742022_1198 (size=8324) 2024-12-10T14:56:19,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742022_1198 (size=8324) 2024-12-10T14:56:19,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742022_1198 (size=8324) 2024-12-10T14:56:19,985 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.06 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testConsecutiveExports/3544e35340615a556e248ecbe6a8f3f1/.tmp/cf/5924360b3a1c49879578e35862929e4f 2024-12-10T14:56:19,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742023_1199 (size=5288) 2024-12-10T14:56:19,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742023_1199 (size=5288) 2024-12-10T14:56:19,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742023_1199 (size=5288) 2024-12-10T14:56:19,992 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=199 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testConsecutiveExports/9c689fc20e3215e22342ca57113b11b8/.tmp/cf/595416239e6a4a4c9e1b3abe89681e60 2024-12-10T14:56:19,997 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testConsecutiveExports/3544e35340615a556e248ecbe6a8f3f1/.tmp/cf/5924360b3a1c49879578e35862929e4f as hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testConsecutiveExports/3544e35340615a556e248ecbe6a8f3f1/cf/5924360b3a1c49879578e35862929e4f 2024-12-10T14:56:20,001 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testConsecutiveExports/9c689fc20e3215e22342ca57113b11b8/.tmp/cf/595416239e6a4a4c9e1b3abe89681e60 as hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testConsecutiveExports/9c689fc20e3215e22342ca57113b11b8/cf/595416239e6a4a4c9e1b3abe89681e60 2024-12-10T14:56:20,005 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testConsecutiveExports/3544e35340615a556e248ecbe6a8f3f1/cf/5924360b3a1c49879578e35862929e4f, entries=47, sequenceid=6, filesize=8.1 K 2024-12-10T14:56:20,007 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.HRegion(3040): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for 3544e35340615a556e248ecbe6a8f3f1 in 58ms, sequenceid=6, compaction requested=false 2024-12-10T14:56:20,007 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testConsecutiveExports' 2024-12-10T14:56:20,008 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testConsecutiveExports/9c689fc20e3215e22342ca57113b11b8/cf/595416239e6a4a4c9e1b3abe89681e60, entries=3, sequenceid=6, filesize=5.2 K 2024-12-10T14:56:20,008 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.HRegion(2538): Flush status journal for 3544e35340615a556e248ecbe6a8f3f1: 2024-12-10T14:56:20,008 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1733842578740.3544e35340615a556e248ecbe6a8f3f1. for snaptb0-testConsecutiveExports completed. 2024-12-10T14:56:20,008 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1733842578740.3544e35340615a556e248ecbe6a8f3f1.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-12-10T14:56:20,008 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-10T14:56:20,008 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testConsecutiveExports/3544e35340615a556e248ecbe6a8f3f1/cf/5924360b3a1c49879578e35862929e4f] hfiles 2024-12-10T14:56:20,008 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testConsecutiveExports/3544e35340615a556e248ecbe6a8f3f1/cf/5924360b3a1c49879578e35862929e4f for snapshot=snaptb0-testConsecutiveExports 2024-12-10T14:56:20,009 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.HRegion(3040): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for 9c689fc20e3215e22342ca57113b11b8 in 60ms, sequenceid=6, compaction requested=false 2024-12-10T14:56:20,009 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.HRegion(2538): Flush status journal for 9c689fc20e3215e22342ca57113b11b8: 2024-12-10T14:56:20,009 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1733842578740.9c689fc20e3215e22342ca57113b11b8. for snaptb0-testConsecutiveExports completed. 2024-12-10T14:56:20,009 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1733842578740.9c689fc20e3215e22342ca57113b11b8.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-12-10T14:56:20,009 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-10T14:56:20,009 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testConsecutiveExports/9c689fc20e3215e22342ca57113b11b8/cf/595416239e6a4a4c9e1b3abe89681e60] hfiles 2024-12-10T14:56:20,009 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testConsecutiveExports/9c689fc20e3215e22342ca57113b11b8/cf/595416239e6a4a4c9e1b3abe89681e60 for snapshot=snaptb0-testConsecutiveExports 2024-12-10T14:56:20,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742024_1200 (size=107) 2024-12-10T14:56:20,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742024_1200 (size=107) 2024-12-10T14:56:20,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742024_1200 (size=107) 2024-12-10T14:56:20,028 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1733842578740.3544e35340615a556e248ecbe6a8f3f1. 2024-12-10T14:56:20,029 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=91 2024-12-10T14:56:20,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.HMaster(4106): Remote procedure done, pid=91 2024-12-10T14:56:20,029 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region 3544e35340615a556e248ecbe6a8f3f1 2024-12-10T14:56:20,029 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=91, ppid=89, state=RUNNABLE; SnapshotRegionProcedure 3544e35340615a556e248ecbe6a8f3f1 2024-12-10T14:56:20,032 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=91, ppid=89, state=SUCCESS; SnapshotRegionProcedure 3544e35340615a556e248ecbe6a8f3f1 in 234 msec 2024-12-10T14:56:20,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742025_1201 (size=107) 2024-12-10T14:56:20,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742025_1201 (size=107) 2024-12-10T14:56:20,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742025_1201 (size=107) 2024-12-10T14:56:20,046 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1733842578740.9c689fc20e3215e22342ca57113b11b8. 2024-12-10T14:56:20,046 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=90 2024-12-10T14:56:20,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.HMaster(4106): Remote procedure done, pid=90 2024-12-10T14:56:20,047 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region 9c689fc20e3215e22342ca57113b11b8 2024-12-10T14:56:20,047 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=90, ppid=89, state=RUNNABLE; SnapshotRegionProcedure 9c689fc20e3215e22342ca57113b11b8 2024-12-10T14:56:20,050 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=90, resume processing ppid=89 2024-12-10T14:56:20,050 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=90, ppid=89, state=SUCCESS; SnapshotRegionProcedure 9c689fc20e3215e22342ca57113b11b8 in 252 msec 2024-12-10T14:56:20,051 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-10T14:56:20,051 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-10T14:56:20,052 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-10T14:56:20,052 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testConsecutiveExports 2024-12-10T14:56:20,053 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-10T14:56:20,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742026_1202 (size=621) 2024-12-10T14:56:20,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742026_1202 (size=621) 2024-12-10T14:56:20,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742026_1202 (size=621) 2024-12-10T14:56:20,070 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-10T14:56:20,076 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-10T14:56:20,076 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-10T14:56:20,078 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-10T14:56:20,078 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 89 2024-12-10T14:56:20,079 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=89, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 301 msec 2024-12-10T14:56:20,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-10T14:56:20,085 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports, procId: 89 completed 2024-12-10T14:56:20,085 INFO [Time-limited test {}] snapshot.TestExportSnapshot(476): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/local-export-1733842580085 2024-12-10T14:56:20,085 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/local-export-1733842580085, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/local-export-1733842580085, srcFsUri=hdfs://localhost:36449, srcDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef 2024-12-10T14:56:20,123 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:36449, inputRoot=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef 2024-12-10T14:56:20,124 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=org.apache.hadoop.fs.LocalFileSystem@1a7f087e, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/local-export-1733842580085, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/local-export-1733842580085/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-10T14:56:20,126 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-10T14:56:20,130 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/local-export-1733842580085/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-10T14:56:20,161 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-10T14:56:20,162 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-10T14:56:20,162 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-10T14:56:20,162 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-10T14:56:21,228 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/hadoop-7247117878572872487.jar 2024-12-10T14:56:21,228 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-10T14:56:21,229 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-10T14:56:21,300 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/hadoop-1230031860173766853.jar 2024-12-10T14:56:21,300 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-10T14:56:21,301 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-10T14:56:21,301 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-10T14:56:21,301 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-10T14:56:21,301 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-10T14:56:21,302 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-10T14:56:21,302 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-10T14:56:21,302 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-10T14:56:21,302 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-10T14:56:21,303 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-10T14:56:21,303 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-10T14:56:21,303 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-10T14:56:21,303 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-10T14:56:21,303 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-10T14:56:21,304 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-10T14:56:21,304 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-10T14:56:21,304 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-10T14:56:21,304 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-10T14:56:21,305 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-10T14:56:21,305 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-10T14:56:21,305 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-10T14:56:21,306 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-10T14:56:21,306 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-10T14:56:21,306 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-10T14:56:21,306 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-10T14:56:21,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742027_1203 (size=127628) 2024-12-10T14:56:21,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742027_1203 (size=127628) 2024-12-10T14:56:21,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742027_1203 (size=127628) 2024-12-10T14:56:21,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742028_1204 (size=2172101) 2024-12-10T14:56:21,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742028_1204 (size=2172101) 2024-12-10T14:56:21,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742028_1204 (size=2172101) 2024-12-10T14:56:21,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742029_1205 (size=213228) 2024-12-10T14:56:21,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742029_1205 (size=213228) 2024-12-10T14:56:21,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742029_1205 (size=213228) 2024-12-10T14:56:21,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742030_1206 (size=1877034) 2024-12-10T14:56:21,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742030_1206 (size=1877034) 2024-12-10T14:56:21,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742030_1206 (size=1877034) 2024-12-10T14:56:21,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742031_1207 (size=451756) 2024-12-10T14:56:21,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742031_1207 (size=451756) 2024-12-10T14:56:21,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742031_1207 (size=451756) 2024-12-10T14:56:21,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742032_1208 (size=6350861) 2024-12-10T14:56:21,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742032_1208 (size=6350861) 2024-12-10T14:56:21,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742032_1208 (size=6350861) 2024-12-10T14:56:21,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742033_1209 (size=533455) 2024-12-10T14:56:21,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742033_1209 (size=533455) 2024-12-10T14:56:21,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742033_1209 (size=533455) 2024-12-10T14:56:21,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742034_1210 (size=7280644) 2024-12-10T14:56:21,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742034_1210 (size=7280644) 2024-12-10T14:56:21,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742034_1210 (size=7280644) 2024-12-10T14:56:21,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742035_1211 (size=4188619) 2024-12-10T14:56:21,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742035_1211 (size=4188619) 2024-12-10T14:56:21,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742035_1211 (size=4188619) 2024-12-10T14:56:21,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742036_1212 (size=20406) 2024-12-10T14:56:21,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742036_1212 (size=20406) 2024-12-10T14:56:21,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742036_1212 (size=20406) 2024-12-10T14:56:21,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742037_1213 (size=75495) 2024-12-10T14:56:21,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742037_1213 (size=75495) 2024-12-10T14:56:21,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742037_1213 (size=75495) 2024-12-10T14:56:21,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742038_1214 (size=45609) 2024-12-10T14:56:21,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742038_1214 (size=45609) 2024-12-10T14:56:21,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742038_1214 (size=45609) 2024-12-10T14:56:21,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742039_1215 (size=110084) 2024-12-10T14:56:21,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742039_1215 (size=110084) 2024-12-10T14:56:21,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742039_1215 (size=110084) 2024-12-10T14:56:21,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742040_1216 (size=1323991) 2024-12-10T14:56:21,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742040_1216 (size=1323991) 2024-12-10T14:56:21,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742040_1216 (size=1323991) 2024-12-10T14:56:22,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742041_1217 (size=23076) 2024-12-10T14:56:22,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742041_1217 (size=23076) 2024-12-10T14:56:22,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742041_1217 (size=23076) 2024-12-10T14:56:22,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742042_1218 (size=126803) 2024-12-10T14:56:22,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742042_1218 (size=126803) 2024-12-10T14:56:22,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742042_1218 (size=126803) 2024-12-10T14:56:22,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742043_1219 (size=322274) 2024-12-10T14:56:22,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742043_1219 (size=322274) 2024-12-10T14:56:22,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742043_1219 (size=322274) 2024-12-10T14:56:22,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742044_1220 (size=1832290) 2024-12-10T14:56:22,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742044_1220 (size=1832290) 2024-12-10T14:56:22,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742044_1220 (size=1832290) 2024-12-10T14:56:22,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742045_1221 (size=30081) 2024-12-10T14:56:22,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742045_1221 (size=30081) 2024-12-10T14:56:22,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742045_1221 (size=30081) 2024-12-10T14:56:22,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742046_1222 (size=53616) 2024-12-10T14:56:22,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742046_1222 (size=53616) 2024-12-10T14:56:22,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742046_1222 (size=53616) 2024-12-10T14:56:22,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742047_1223 (size=29229) 2024-12-10T14:56:22,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742047_1223 (size=29229) 2024-12-10T14:56:22,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742047_1223 (size=29229) 2024-12-10T14:56:22,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742048_1224 (size=169089) 2024-12-10T14:56:22,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742048_1224 (size=169089) 2024-12-10T14:56:22,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742048_1224 (size=169089) 2024-12-10T14:56:22,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742049_1225 (size=5175431) 2024-12-10T14:56:22,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742049_1225 (size=5175431) 2024-12-10T14:56:22,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742049_1225 (size=5175431) 2024-12-10T14:56:22,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742050_1226 (size=136454) 2024-12-10T14:56:22,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742050_1226 (size=136454) 2024-12-10T14:56:22,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742050_1226 (size=136454) 2024-12-10T14:56:22,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742051_1227 (size=907858) 2024-12-10T14:56:22,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742051_1227 (size=907858) 2024-12-10T14:56:22,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742051_1227 (size=907858) 2024-12-10T14:56:22,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742052_1228 (size=3317408) 2024-12-10T14:56:22,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742052_1228 (size=3317408) 2024-12-10T14:56:22,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742052_1228 (size=3317408) 2024-12-10T14:56:22,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742053_1229 (size=503880) 2024-12-10T14:56:22,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742053_1229 (size=503880) 2024-12-10T14:56:22,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742053_1229 (size=503880) 2024-12-10T14:56:22,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742054_1230 (size=4695811) 2024-12-10T14:56:22,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742054_1230 (size=4695811) 2024-12-10T14:56:22,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742054_1230 (size=4695811) 2024-12-10T14:56:22,188 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-10T14:56:22,191 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-12-10T14:56:22,193 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K 2024-12-10T14:56:22,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742055_1231 (size=338) 2024-12-10T14:56:22,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742055_1231 (size=338) 2024-12-10T14:56:22,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742055_1231 (size=338) 2024-12-10T14:56:22,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742056_1232 (size=15) 2024-12-10T14:56:22,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742056_1232 (size=15) 2024-12-10T14:56:22,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742056_1232 (size=15) 2024-12-10T14:56:22,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742057_1233 (size=304930) 2024-12-10T14:56:22,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742057_1233 (size=304930) 2024-12-10T14:56:22,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742057_1233 (size=304930) 2024-12-10T14:56:22,399 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-10T14:56:22,399 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-10T14:56:22,403 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733842510486_0003_000001 (auth:SIMPLE) from 127.0.0.1:57472 2024-12-10T14:56:22,414 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-0_1/usercache/jenkins/appcache/application_1733842510486_0003/container_1733842510486_0003_01_000001/launch_container.sh] 2024-12-10T14:56:22,414 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-0_1/usercache/jenkins/appcache/application_1733842510486_0003/container_1733842510486_0003_01_000001/container_tokens] 2024-12-10T14:56:22,415 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-0_1/usercache/jenkins/appcache/application_1733842510486_0003/container_1733842510486_0003_01_000001/sysfs] 2024-12-10T14:56:22,592 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-12-10T14:56:22,592 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports Metrics about Tables on a single HBase RegionServer 2024-12-10T14:56:22,593 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-12-10T14:56:22,801 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733842510486_0004_000001 (auth:SIMPLE) from 127.0.0.1:45598 2024-12-10T14:56:23,944 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-10T14:56:28,843 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733842510486_0004_000001 (auth:SIMPLE) from 127.0.0.1:44112 2024-12-10T14:56:29,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742058_1234 (size=350604) 2024-12-10T14:56:29,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742058_1234 (size=350604) 2024-12-10T14:56:29,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742058_1234 (size=350604) 2024-12-10T14:56:30,924 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-10T14:56:31,176 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733842510486_0004_000001 (auth:SIMPLE) from 127.0.0.1:37250 2024-12-10T14:56:35,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742059_1235 (size=17447) 2024-12-10T14:56:35,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742059_1235 (size=17447) 2024-12-10T14:56:35,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742059_1235 (size=17447) 2024-12-10T14:56:35,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742060_1236 (size=462) 2024-12-10T14:56:35,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742060_1236 (size=462) 2024-12-10T14:56:35,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742060_1236 (size=462) 2024-12-10T14:56:35,823 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-0_1/usercache/jenkins/appcache/application_1733842510486_0004/container_1733842510486_0004_01_000002/launch_container.sh] 2024-12-10T14:56:35,823 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-0_1/usercache/jenkins/appcache/application_1733842510486_0004/container_1733842510486_0004_01_000002/container_tokens] 2024-12-10T14:56:35,823 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-0_1/usercache/jenkins/appcache/application_1733842510486_0004/container_1733842510486_0004_01_000002/sysfs] 2024-12-10T14:56:35,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742061_1237 (size=17447) 2024-12-10T14:56:35,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742061_1237 (size=17447) 2024-12-10T14:56:35,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742061_1237 (size=17447) 2024-12-10T14:56:35,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742062_1238 (size=350604) 2024-12-10T14:56:35,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742062_1238 (size=350604) 2024-12-10T14:56:35,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742062_1238 (size=350604) 2024-12-10T14:56:35,905 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733842510486_0004_000001 (auth:SIMPLE) from 127.0.0.1:37254 2024-12-10T14:56:37,418 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-10T14:56:37,419 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-10T14:56:37,422 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb0-testConsecutiveExports 2024-12-10T14:56:37,422 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-10T14:56:37,422 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-10T14:56:37,422 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_996159037_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-10T14:56:37,423 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-10T14:56:37,423 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-10T14:56:37,423 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in org.apache.hadoop.fs.LocalFileSystem@1a7f087e in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/local-export-1733842580085/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/local-export-1733842580085/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-10T14:56:37,424 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/local-export-1733842580085/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-10T14:56:37,424 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/local-export-1733842580085/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-10T14:56:37,426 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/local-export-1733842580085, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/local-export-1733842580085, srcFsUri=hdfs://localhost:36449, srcDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef 2024-12-10T14:56:37,473 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:36449, inputRoot=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef 2024-12-10T14:56:37,473 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=org.apache.hadoop.fs.LocalFileSystem@1a7f087e, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/local-export-1733842580085, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/local-export-1733842580085/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-10T14:56:37,475 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-10T14:56:37,484 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/local-export-1733842580085/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-10T14:56:37,512 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-10T14:56:37,513 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-10T14:56:37,513 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-10T14:56:37,513 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-10T14:56:38,593 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/hadoop-7677219849405201149.jar 2024-12-10T14:56:38,593 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-10T14:56:38,594 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-10T14:56:38,665 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/hadoop-13609038915136477055.jar 2024-12-10T14:56:38,665 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-10T14:56:38,665 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-10T14:56:38,666 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-10T14:56:38,666 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-10T14:56:38,666 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-10T14:56:38,666 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-10T14:56:38,666 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-10T14:56:38,667 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-10T14:56:38,667 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-10T14:56:38,667 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-10T14:56:38,667 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-10T14:56:38,667 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-10T14:56:38,668 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-10T14:56:38,668 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-10T14:56:38,668 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-10T14:56:38,668 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-10T14:56:38,669 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-10T14:56:38,669 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-10T14:56:38,669 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-10T14:56:38,669 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-10T14:56:38,669 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-10T14:56:38,670 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-10T14:56:38,670 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-10T14:56:38,670 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-10T14:56:38,670 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-10T14:56:38,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742063_1239 (size=127628) 2024-12-10T14:56:38,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742063_1239 (size=127628) 2024-12-10T14:56:38,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742063_1239 (size=127628) 2024-12-10T14:56:38,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742064_1240 (size=2172101) 2024-12-10T14:56:38,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742064_1240 (size=2172101) 2024-12-10T14:56:38,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742064_1240 (size=2172101) 2024-12-10T14:56:38,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742065_1241 (size=213228) 2024-12-10T14:56:38,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742065_1241 (size=213228) 2024-12-10T14:56:38,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742065_1241 (size=213228) 2024-12-10T14:56:38,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742066_1242 (size=1877034) 2024-12-10T14:56:38,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742066_1242 (size=1877034) 2024-12-10T14:56:38,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742066_1242 (size=1877034) 2024-12-10T14:56:38,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742067_1243 (size=533455) 2024-12-10T14:56:38,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742067_1243 (size=533455) 2024-12-10T14:56:38,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742067_1243 (size=533455) 2024-12-10T14:56:38,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742068_1244 (size=7280644) 2024-12-10T14:56:38,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742068_1244 (size=7280644) 2024-12-10T14:56:38,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742068_1244 (size=7280644) 2024-12-10T14:56:38,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742069_1245 (size=4188619) 2024-12-10T14:56:38,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742069_1245 (size=4188619) 2024-12-10T14:56:38,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742069_1245 (size=4188619) 2024-12-10T14:56:38,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742070_1246 (size=6350861) 2024-12-10T14:56:38,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742070_1246 (size=6350861) 2024-12-10T14:56:38,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742070_1246 (size=6350861) 2024-12-10T14:56:38,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742071_1247 (size=20406) 2024-12-10T14:56:38,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742071_1247 (size=20406) 2024-12-10T14:56:38,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742071_1247 (size=20406) 2024-12-10T14:56:38,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742072_1248 (size=75495) 2024-12-10T14:56:38,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742072_1248 (size=75495) 2024-12-10T14:56:38,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742072_1248 (size=75495) 2024-12-10T14:56:38,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742073_1249 (size=45609) 2024-12-10T14:56:38,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742073_1249 (size=45609) 2024-12-10T14:56:38,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742073_1249 (size=45609) 2024-12-10T14:56:38,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742074_1250 (size=110084) 2024-12-10T14:56:38,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742074_1250 (size=110084) 2024-12-10T14:56:38,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742074_1250 (size=110084) 2024-12-10T14:56:38,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742075_1251 (size=1323991) 2024-12-10T14:56:38,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742075_1251 (size=1323991) 2024-12-10T14:56:38,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742075_1251 (size=1323991) 2024-12-10T14:56:38,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742076_1252 (size=23076) 2024-12-10T14:56:38,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742076_1252 (size=23076) 2024-12-10T14:56:38,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742076_1252 (size=23076) 2024-12-10T14:56:38,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742077_1253 (size=126803) 2024-12-10T14:56:38,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742077_1253 (size=126803) 2024-12-10T14:56:38,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742077_1253 (size=126803) 2024-12-10T14:56:38,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742078_1254 (size=322274) 2024-12-10T14:56:38,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742078_1254 (size=322274) 2024-12-10T14:56:38,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742078_1254 (size=322274) 2024-12-10T14:56:38,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742079_1255 (size=1832290) 2024-12-10T14:56:38,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742079_1255 (size=1832290) 2024-12-10T14:56:38,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742079_1255 (size=1832290) 2024-12-10T14:56:38,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742080_1256 (size=30081) 2024-12-10T14:56:38,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742080_1256 (size=30081) 2024-12-10T14:56:38,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742080_1256 (size=30081) 2024-12-10T14:56:38,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742081_1257 (size=53616) 2024-12-10T14:56:38,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742081_1257 (size=53616) 2024-12-10T14:56:38,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742081_1257 (size=53616) 2024-12-10T14:56:38,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742082_1258 (size=29229) 2024-12-10T14:56:38,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742082_1258 (size=29229) 2024-12-10T14:56:38,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742082_1258 (size=29229) 2024-12-10T14:56:38,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742083_1259 (size=169089) 2024-12-10T14:56:38,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742083_1259 (size=169089) 2024-12-10T14:56:38,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742083_1259 (size=169089) 2024-12-10T14:56:38,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742084_1260 (size=5175431) 2024-12-10T14:56:38,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742084_1260 (size=5175431) 2024-12-10T14:56:38,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742084_1260 (size=5175431) 2024-12-10T14:56:38,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742085_1261 (size=136454) 2024-12-10T14:56:38,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742085_1261 (size=136454) 2024-12-10T14:56:38,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742085_1261 (size=136454) 2024-12-10T14:56:38,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742086_1262 (size=907858) 2024-12-10T14:56:38,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742086_1262 (size=907858) 2024-12-10T14:56:38,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742086_1262 (size=907858) 2024-12-10T14:56:39,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742087_1263 (size=3317408) 2024-12-10T14:56:39,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742087_1263 (size=3317408) 2024-12-10T14:56:39,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742087_1263 (size=3317408) 2024-12-10T14:56:39,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742088_1264 (size=503880) 2024-12-10T14:56:39,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742088_1264 (size=503880) 2024-12-10T14:56:39,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742088_1264 (size=503880) 2024-12-10T14:56:39,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742089_1265 (size=451756) 2024-12-10T14:56:39,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742089_1265 (size=451756) 2024-12-10T14:56:39,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742089_1265 (size=451756) 2024-12-10T14:56:39,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742090_1266 (size=4695811) 2024-12-10T14:56:39,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742090_1266 (size=4695811) 2024-12-10T14:56:39,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742090_1266 (size=4695811) 2024-12-10T14:56:39,053 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-10T14:56:39,056 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-12-10T14:56:39,058 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K 2024-12-10T14:56:39,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742091_1267 (size=338) 2024-12-10T14:56:39,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742091_1267 (size=338) 2024-12-10T14:56:39,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742091_1267 (size=338) 2024-12-10T14:56:39,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742092_1268 (size=15) 2024-12-10T14:56:39,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742092_1268 (size=15) 2024-12-10T14:56:39,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742092_1268 (size=15) 2024-12-10T14:56:39,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742093_1269 (size=304932) 2024-12-10T14:56:39,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742093_1269 (size=304932) 2024-12-10T14:56:39,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742093_1269 (size=304932) 2024-12-10T14:56:41,987 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-10T14:56:41,987 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-10T14:56:41,989 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733842510486_0004_000001 (auth:SIMPLE) from 127.0.0.1:53434 2024-12-10T14:56:42,001 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-1_1/usercache/jenkins/appcache/application_1733842510486_0004/container_1733842510486_0004_01_000001/launch_container.sh] 2024-12-10T14:56:42,001 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-1_1/usercache/jenkins/appcache/application_1733842510486_0004/container_1733842510486_0004_01_000001/container_tokens] 2024-12-10T14:56:42,001 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-1_1/usercache/jenkins/appcache/application_1733842510486_0004/container_1733842510486_0004_01_000001/sysfs] 2024-12-10T14:56:42,805 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733842510486_0005_000001 (auth:SIMPLE) from 127.0.0.1:45794 2024-12-10T14:56:48,591 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733842510486_0005_000001 (auth:SIMPLE) from 127.0.0.1:47612 2024-12-10T14:56:48,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742094_1270 (size=350606) 2024-12-10T14:56:48,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742094_1270 (size=350606) 2024-12-10T14:56:48,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742094_1270 (size=350606) 2024-12-10T14:56:50,917 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733842510486_0005_000001 (auth:SIMPLE) from 127.0.0.1:50516 2024-12-10T14:56:54,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742095_1271 (size=16924) 2024-12-10T14:56:54,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742095_1271 (size=16924) 2024-12-10T14:56:54,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742095_1271 (size=16924) 2024-12-10T14:56:54,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742096_1272 (size=462) 2024-12-10T14:56:54,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742096_1272 (size=462) 2024-12-10T14:56:54,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742096_1272 (size=462) 2024-12-10T14:56:54,282 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-0_0/usercache/jenkins/appcache/application_1733842510486_0005/container_1733842510486_0005_01_000002/launch_container.sh] 2024-12-10T14:56:54,282 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-0_0/usercache/jenkins/appcache/application_1733842510486_0005/container_1733842510486_0005_01_000002/container_tokens] 2024-12-10T14:56:54,282 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-0_0/usercache/jenkins/appcache/application_1733842510486_0005/container_1733842510486_0005_01_000002/sysfs] 2024-12-10T14:56:54,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742097_1273 (size=16924) 2024-12-10T14:56:54,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742097_1273 (size=16924) 2024-12-10T14:56:54,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742097_1273 (size=16924) 2024-12-10T14:56:54,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742098_1274 (size=350606) 2024-12-10T14:56:54,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742098_1274 (size=350606) 2024-12-10T14:56:54,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742098_1274 (size=350606) 2024-12-10T14:56:54,321 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733842510486_0005_000001 (auth:SIMPLE) from 127.0.0.1:50532 2024-12-10T14:56:56,280 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-10T14:56:56,280 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-10T14:56:56,284 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb0-testConsecutiveExports 2024-12-10T14:56:56,284 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-10T14:56:56,284 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-10T14:56:56,285 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_996159037_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-10T14:56:56,286 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-10T14:56:56,287 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-10T14:56:56,287 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in org.apache.hadoop.fs.LocalFileSystem@1a7f087e in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/local-export-1733842580085/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/local-export-1733842580085/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-10T14:56:56,287 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/local-export-1733842580085/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-10T14:56:56,287 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/local-export-1733842580085/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-10T14:56:56,310 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testConsecutiveExports 2024-12-10T14:56:56,311 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testConsecutiveExports 2024-12-10T14:56:56,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] procedure2.ProcedureExecutor(1098): Stored pid=92, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testConsecutiveExports 2024-12-10T14:56:56,315 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733842616315"}]},"ts":"1733842616315"} 2024-12-10T14:56:56,317 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testConsecutiveExports, state=DISABLING in hbase:meta 2024-12-10T14:56:56,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-12-10T14:56:56,319 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testConsecutiveExports to state=DISABLING 2024-12-10T14:56:56,321 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=93, ppid=92, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testConsecutiveExports}] 2024-12-10T14:56:56,323 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=94, ppid=93, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=9c689fc20e3215e22342ca57113b11b8, UNASSIGN}, {pid=95, ppid=93, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=3544e35340615a556e248ecbe6a8f3f1, UNASSIGN}] 2024-12-10T14:56:56,324 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=95, ppid=93, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=3544e35340615a556e248ecbe6a8f3f1, UNASSIGN 2024-12-10T14:56:56,325 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=94, ppid=93, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=9c689fc20e3215e22342ca57113b11b8, UNASSIGN 2024-12-10T14:56:56,326 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=95 updating hbase:meta row=3544e35340615a556e248ecbe6a8f3f1, regionState=CLOSING, regionLocation=db1d50717577,37707,1733842502956 2024-12-10T14:56:56,326 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=94 updating hbase:meta row=9c689fc20e3215e22342ca57113b11b8, regionState=CLOSING, regionLocation=db1d50717577,41775,1733842502887 2024-12-10T14:56:56,329 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36533 {}] assignment.AssignmentManager(1526): Unable to acquire lock for regionNode state=CLOSING, location=db1d50717577,37707,1733842502956, table=testtb-testConsecutiveExports, region=3544e35340615a556e248ecbe6a8f3f1. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-10T14:56:56,329 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-10T14:56:56,330 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=96, ppid=95, state=RUNNABLE; CloseRegionProcedure 3544e35340615a556e248ecbe6a8f3f1, server=db1d50717577,37707,1733842502956}] 2024-12-10T14:56:56,338 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-10T14:56:56,339 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=97, ppid=94, state=RUNNABLE; CloseRegionProcedure 9c689fc20e3215e22342ca57113b11b8, server=db1d50717577,41775,1733842502887}] 2024-12-10T14:56:56,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-12-10T14:56:56,488 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,37707,1733842502956 2024-12-10T14:56:56,489 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(124): Close 3544e35340615a556e248ecbe6a8f3f1 2024-12-10T14:56:56,489 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-10T14:56:56,489 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1681): Closing 3544e35340615a556e248ecbe6a8f3f1, disabling compactions & flushes 2024-12-10T14:56:56,489 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1703): Closing region testtb-testConsecutiveExports,1,1733842578740.3544e35340615a556e248ecbe6a8f3f1. 2024-12-10T14:56:56,489 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testConsecutiveExports,1,1733842578740.3544e35340615a556e248ecbe6a8f3f1. 2024-12-10T14:56:56,489 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1791): Acquired close lock on testtb-testConsecutiveExports,1,1733842578740.3544e35340615a556e248ecbe6a8f3f1. after waiting 0 ms 2024-12-10T14:56:56,490 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1801): Updates disabled for region testtb-testConsecutiveExports,1,1733842578740.3544e35340615a556e248ecbe6a8f3f1. 2024-12-10T14:56:56,491 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,41775,1733842502887 2024-12-10T14:56:56,493 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] handler.UnassignRegionHandler(124): Close 9c689fc20e3215e22342ca57113b11b8 2024-12-10T14:56:56,493 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-10T14:56:56,493 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1681): Closing 9c689fc20e3215e22342ca57113b11b8, disabling compactions & flushes 2024-12-10T14:56:56,493 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1703): Closing region testtb-testConsecutiveExports,,1733842578740.9c689fc20e3215e22342ca57113b11b8. 2024-12-10T14:56:56,493 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testConsecutiveExports,,1733842578740.9c689fc20e3215e22342ca57113b11b8. 2024-12-10T14:56:56,493 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1791): Acquired close lock on testtb-testConsecutiveExports,,1733842578740.9c689fc20e3215e22342ca57113b11b8. after waiting 0 ms 2024-12-10T14:56:56,493 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1801): Updates disabled for region testtb-testConsecutiveExports,,1733842578740.9c689fc20e3215e22342ca57113b11b8. 2024-12-10T14:56:56,513 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testConsecutiveExports/9c689fc20e3215e22342ca57113b11b8/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-10T14:56:56,513 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testConsecutiveExports/3544e35340615a556e248ecbe6a8f3f1/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-10T14:56:56,514 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-10T14:56:56,514 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1922): Closed testtb-testConsecutiveExports,,1733842578740.9c689fc20e3215e22342ca57113b11b8. 2024-12-10T14:56:56,514 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-10T14:56:56,514 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1635): Region close journal for 9c689fc20e3215e22342ca57113b11b8: 2024-12-10T14:56:56,514 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1922): Closed testtb-testConsecutiveExports,1,1733842578740.3544e35340615a556e248ecbe6a8f3f1. 2024-12-10T14:56:56,514 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1635): Region close journal for 3544e35340615a556e248ecbe6a8f3f1: 2024-12-10T14:56:56,521 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(170): Closed 3544e35340615a556e248ecbe6a8f3f1 2024-12-10T14:56:56,522 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=95 updating hbase:meta row=3544e35340615a556e248ecbe6a8f3f1, regionState=CLOSED 2024-12-10T14:56:56,523 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] handler.UnassignRegionHandler(170): Closed 9c689fc20e3215e22342ca57113b11b8 2024-12-10T14:56:56,524 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=94 updating hbase:meta row=9c689fc20e3215e22342ca57113b11b8, regionState=CLOSED 2024-12-10T14:56:56,527 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=96, resume processing ppid=95 2024-12-10T14:56:56,527 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=96, ppid=95, state=SUCCESS; CloseRegionProcedure 3544e35340615a556e248ecbe6a8f3f1, server=db1d50717577,37707,1733842502956 in 195 msec 2024-12-10T14:56:56,528 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=97, resume processing ppid=94 2024-12-10T14:56:56,531 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=95, ppid=93, state=SUCCESS; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=3544e35340615a556e248ecbe6a8f3f1, UNASSIGN in 204 msec 2024-12-10T14:56:56,531 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=97, ppid=94, state=SUCCESS; CloseRegionProcedure 9c689fc20e3215e22342ca57113b11b8, server=db1d50717577,41775,1733842502887 in 187 msec 2024-12-10T14:56:56,532 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=94, resume processing ppid=93 2024-12-10T14:56:56,532 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=94, ppid=93, state=SUCCESS; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=9c689fc20e3215e22342ca57113b11b8, UNASSIGN in 205 msec 2024-12-10T14:56:56,534 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=93, resume processing ppid=92 2024-12-10T14:56:56,534 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=93, ppid=92, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testConsecutiveExports in 212 msec 2024-12-10T14:56:56,536 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733842616535"}]},"ts":"1733842616535"} 2024-12-10T14:56:56,537 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testConsecutiveExports, state=DISABLED in hbase:meta 2024-12-10T14:56:56,539 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testConsecutiveExports to state=DISABLED 2024-12-10T14:56:56,541 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=92, state=SUCCESS; DisableTableProcedure table=testtb-testConsecutiveExports in 229 msec 2024-12-10T14:56:56,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-12-10T14:56:56,626 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testConsecutiveExports, procId: 92 completed 2024-12-10T14:56:56,627 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testConsecutiveExports 2024-12-10T14:56:56,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] procedure2.ProcedureExecutor(1098): Stored pid=98, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-10T14:56:56,629 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=98, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-10T14:56:56,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testConsecutiveExports 2024-12-10T14:56:56,629 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=98, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-10T14:56:56,631 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41775 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testConsecutiveExports 2024-12-10T14:56:56,634 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testConsecutiveExports/9c689fc20e3215e22342ca57113b11b8 2024-12-10T14:56:56,634 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testConsecutiveExports/3544e35340615a556e248ecbe6a8f3f1 2024-12-10T14:56:56,636 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-10T14:56:56,636 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45255-0x1019cdee8310003, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-10T14:56:56,636 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-10T14:56:56,637 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-10T14:56:56,639 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-10T14:56:56,639 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-10T14:56:56,640 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-10T14:56:56,640 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-10T14:56:56,642 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45255-0x1019cdee8310003, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-10T14:56:56,642 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-10T14:56:56,642 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45255-0x1019cdee8310003, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:56:56,642 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:56:56,642 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testConsecutiveExports/9c689fc20e3215e22342ca57113b11b8/cf, FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testConsecutiveExports/9c689fc20e3215e22342ca57113b11b8/recovered.edits] 2024-12-10T14:56:56,644 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-10T14:56:56,644 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:56:56,644 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-10T14:56:56,644 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:56:56,645 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testConsecutiveExports/3544e35340615a556e248ecbe6a8f3f1/cf, FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testConsecutiveExports/3544e35340615a556e248ecbe6a8f3f1/recovered.edits] 2024-12-10T14:56:56,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-10T14:56:56,658 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testConsecutiveExports/9c689fc20e3215e22342ca57113b11b8/cf/595416239e6a4a4c9e1b3abe89681e60 to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/archive/data/default/testtb-testConsecutiveExports/9c689fc20e3215e22342ca57113b11b8/cf/595416239e6a4a4c9e1b3abe89681e60 2024-12-10T14:56:56,659 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testConsecutiveExports/3544e35340615a556e248ecbe6a8f3f1/cf/5924360b3a1c49879578e35862929e4f to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/archive/data/default/testtb-testConsecutiveExports/3544e35340615a556e248ecbe6a8f3f1/cf/5924360b3a1c49879578e35862929e4f 2024-12-10T14:56:56,665 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testConsecutiveExports/3544e35340615a556e248ecbe6a8f3f1/recovered.edits/9.seqid to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/archive/data/default/testtb-testConsecutiveExports/3544e35340615a556e248ecbe6a8f3f1/recovered.edits/9.seqid 2024-12-10T14:56:56,665 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testConsecutiveExports/9c689fc20e3215e22342ca57113b11b8/recovered.edits/9.seqid to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/archive/data/default/testtb-testConsecutiveExports/9c689fc20e3215e22342ca57113b11b8/recovered.edits/9.seqid 2024-12-10T14:56:56,666 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testConsecutiveExports/3544e35340615a556e248ecbe6a8f3f1 2024-12-10T14:56:56,666 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testConsecutiveExports/9c689fc20e3215e22342ca57113b11b8 2024-12-10T14:56:56,666 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testConsecutiveExports regions 2024-12-10T14:56:56,669 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=98, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-10T14:56:56,674 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testConsecutiveExports from hbase:meta 2024-12-10T14:56:56,684 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testConsecutiveExports' descriptor. 2024-12-10T14:56:56,685 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=98, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-10T14:56:56,685 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testConsecutiveExports' from region states. 2024-12-10T14:56:56,686 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,,1733842578740.9c689fc20e3215e22342ca57113b11b8.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733842616685"}]},"ts":"9223372036854775807"} 2024-12-10T14:56:56,686 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,1,1733842578740.3544e35340615a556e248ecbe6a8f3f1.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733842616685"}]},"ts":"9223372036854775807"} 2024-12-10T14:56:56,688 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-10T14:56:56,689 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 9c689fc20e3215e22342ca57113b11b8, NAME => 'testtb-testConsecutiveExports,,1733842578740.9c689fc20e3215e22342ca57113b11b8.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 3544e35340615a556e248ecbe6a8f3f1, NAME => 'testtb-testConsecutiveExports,1,1733842578740.3544e35340615a556e248ecbe6a8f3f1.', STARTKEY => '1', ENDKEY => ''}] 2024-12-10T14:56:56,689 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testConsecutiveExports' as deleted. 2024-12-10T14:56:56,689 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733842616689"}]},"ts":"9223372036854775807"} 2024-12-10T14:56:56,691 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testConsecutiveExports state from META 2024-12-10T14:56:56,694 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=98, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-10T14:56:56,696 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=98, state=SUCCESS; DeleteTableProcedure table=testtb-testConsecutiveExports in 67 msec 2024-12-10T14:56:56,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-10T14:56:56,748 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testConsecutiveExports, procId: 98 completed 2024-12-10T14:56:56,763 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testConsecutiveExports" 2024-12-10T14:56:56,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testConsecutiveExports 2024-12-10T14:56:56,767 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testConsecutiveExports" 2024-12-10T14:56:56,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testConsecutiveExports 2024-12-10T14:56:56,800 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testConsecutiveExports Thread=797 (was 798), OpenFileDescriptor=807 (was 817), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=551 (was 527) - SystemLoadAverage LEAK? -, ProcessCount=20 (was 17) - ProcessCount LEAK? -, AvailableMemoryMB=1590 (was 2074) 2024-12-10T14:56:56,800 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=797 is superior to 500 2024-12-10T14:56:56,826 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=797, OpenFileDescriptor=807, MaxFileDescriptor=1048576, SystemLoadAverage=551, ProcessCount=20, AvailableMemoryMB=1584 2024-12-10T14:56:56,826 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=797 is superior to 500 2024-12-10T14:56:56,828 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-10T14:56:56,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] procedure2.ProcedureExecutor(1098): Stored pid=99, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-10T14:56:56,832 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=99, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-12-10T14:56:56,832 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:56:56,833 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion" procId is: 99 2024-12-10T14:56:56,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-12-10T14:56:56,834 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=99, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-10T14:56:56,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742099_1275 (size=422) 2024-12-10T14:56:56,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742099_1275 (size=422) 2024-12-10T14:56:56,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742099_1275 (size=422) 2024-12-10T14:56:56,850 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 54dee9bbb9571fe8ba05781cf679cda0, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1733842616828.54dee9bbb9571fe8ba05781cf679cda0.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef 2024-12-10T14:56:56,850 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => ad6ff8dc223b4960571243a5d1c11885, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1733842616828.ad6ff8dc223b4960571243a5d1c11885.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef 2024-12-10T14:56:56,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742100_1276 (size=83) 2024-12-10T14:56:56,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742100_1276 (size=83) 2024-12-10T14:56:56,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742100_1276 (size=83) 2024-12-10T14:56:56,874 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1733842616828.ad6ff8dc223b4960571243a5d1c11885.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:56:56,874 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1681): Closing ad6ff8dc223b4960571243a5d1c11885, disabling compactions & flushes 2024-12-10T14:56:56,874 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1733842616828.ad6ff8dc223b4960571243a5d1c11885. 2024-12-10T14:56:56,874 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733842616828.ad6ff8dc223b4960571243a5d1c11885. 2024-12-10T14:56:56,874 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733842616828.ad6ff8dc223b4960571243a5d1c11885. after waiting 0 ms 2024-12-10T14:56:56,874 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1733842616828.ad6ff8dc223b4960571243a5d1c11885. 2024-12-10T14:56:56,874 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1733842616828.ad6ff8dc223b4960571243a5d1c11885. 2024-12-10T14:56:56,874 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1635): Region close journal for ad6ff8dc223b4960571243a5d1c11885: 2024-12-10T14:56:56,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742101_1277 (size=83) 2024-12-10T14:56:56,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742101_1277 (size=83) 2024-12-10T14:56:56,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742101_1277 (size=83) 2024-12-10T14:56:56,886 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1733842616828.54dee9bbb9571fe8ba05781cf679cda0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:56:56,886 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1681): Closing 54dee9bbb9571fe8ba05781cf679cda0, disabling compactions & flushes 2024-12-10T14:56:56,886 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1733842616828.54dee9bbb9571fe8ba05781cf679cda0. 2024-12-10T14:56:56,886 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733842616828.54dee9bbb9571fe8ba05781cf679cda0. 2024-12-10T14:56:56,886 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733842616828.54dee9bbb9571fe8ba05781cf679cda0. after waiting 0 ms 2024-12-10T14:56:56,886 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1733842616828.54dee9bbb9571fe8ba05781cf679cda0. 2024-12-10T14:56:56,886 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion,,1733842616828.54dee9bbb9571fe8ba05781cf679cda0. 2024-12-10T14:56:56,886 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1635): Region close journal for 54dee9bbb9571fe8ba05781cf679cda0: 2024-12-10T14:56:56,887 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=99, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ADD_TO_META 2024-12-10T14:56:56,887 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1733842616828.ad6ff8dc223b4960571243a5d1c11885.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1733842616887"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733842616887"}]},"ts":"1733842616887"} 2024-12-10T14:56:56,887 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1733842616828.54dee9bbb9571fe8ba05781cf679cda0.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1733842616887"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733842616887"}]},"ts":"1733842616887"} 2024-12-10T14:56:56,890 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-10T14:56:56,891 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=99, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-10T14:56:56,892 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733842616891"}]},"ts":"1733842616891"} 2024-12-10T14:56:56,893 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLING in hbase:meta 2024-12-10T14:56:56,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-12-10T14:56:56,961 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(202): Hosts are {db1d50717577=0} racks are {/default-rack=0} 2024-12-10T14:56:56,962 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-10T14:56:56,962 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-10T14:56:56,963 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-10T14:56:56,963 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-10T14:56:56,963 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-10T14:56:56,963 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-10T14:56:56,963 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-10T14:56:56,963 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=100, ppid=99, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=54dee9bbb9571fe8ba05781cf679cda0, ASSIGN}, {pid=101, ppid=99, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=ad6ff8dc223b4960571243a5d1c11885, ASSIGN}] 2024-12-10T14:56:56,965 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=101, ppid=99, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=ad6ff8dc223b4960571243a5d1c11885, ASSIGN 2024-12-10T14:56:56,965 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=100, ppid=99, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=54dee9bbb9571fe8ba05781cf679cda0, ASSIGN 2024-12-10T14:56:56,966 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=101, ppid=99, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=ad6ff8dc223b4960571243a5d1c11885, ASSIGN; state=OFFLINE, location=db1d50717577,41775,1733842502887; forceNewPlan=false, retain=false 2024-12-10T14:56:56,966 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=100, ppid=99, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=54dee9bbb9571fe8ba05781cf679cda0, ASSIGN; state=OFFLINE, location=db1d50717577,37707,1733842502956; forceNewPlan=false, retain=false 2024-12-10T14:56:57,117 INFO [db1d50717577:36533 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-10T14:56:57,117 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=100 updating hbase:meta row=54dee9bbb9571fe8ba05781cf679cda0, regionState=OPENING, regionLocation=db1d50717577,37707,1733842502956 2024-12-10T14:56:57,117 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=101 updating hbase:meta row=ad6ff8dc223b4960571243a5d1c11885, regionState=OPENING, regionLocation=db1d50717577,41775,1733842502887 2024-12-10T14:56:57,119 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=102, ppid=100, state=RUNNABLE; OpenRegionProcedure 54dee9bbb9571fe8ba05781cf679cda0, server=db1d50717577,37707,1733842502956}] 2024-12-10T14:56:57,121 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=103, ppid=101, state=RUNNABLE; OpenRegionProcedure ad6ff8dc223b4960571243a5d1c11885, server=db1d50717577,41775,1733842502887}] 2024-12-10T14:56:57,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-12-10T14:56:57,271 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,37707,1733842502956 2024-12-10T14:56:57,273 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,41775,1733842502887 2024-12-10T14:56:57,275 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithMergeRegion,,1733842616828.54dee9bbb9571fe8ba05781cf679cda0. 2024-12-10T14:56:57,275 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(7285): Opening region: {ENCODED => 54dee9bbb9571fe8ba05781cf679cda0, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1733842616828.54dee9bbb9571fe8ba05781cf679cda0.', STARTKEY => '', ENDKEY => '1'} 2024-12-10T14:56:57,276 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,,1733842616828.54dee9bbb9571fe8ba05781cf679cda0. service=AccessControlService 2024-12-10T14:56:57,276 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-10T14:56:57,276 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion 54dee9bbb9571fe8ba05781cf679cda0 2024-12-10T14:56:57,276 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1733842616828.54dee9bbb9571fe8ba05781cf679cda0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:56:57,276 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(7327): checking encryption for 54dee9bbb9571fe8ba05781cf679cda0 2024-12-10T14:56:57,276 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(7330): checking classloading for 54dee9bbb9571fe8ba05781cf679cda0 2024-12-10T14:56:57,279 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithMergeRegion,1,1733842616828.ad6ff8dc223b4960571243a5d1c11885. 2024-12-10T14:56:57,280 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(7285): Opening region: {ENCODED => ad6ff8dc223b4960571243a5d1c11885, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1733842616828.ad6ff8dc223b4960571243a5d1c11885.', STARTKEY => '1', ENDKEY => ''} 2024-12-10T14:56:57,280 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,1,1733842616828.ad6ff8dc223b4960571243a5d1c11885. service=AccessControlService 2024-12-10T14:56:57,280 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-10T14:56:57,280 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion ad6ff8dc223b4960571243a5d1c11885 2024-12-10T14:56:57,281 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1733842616828.ad6ff8dc223b4960571243a5d1c11885.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:56:57,281 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(7327): checking encryption for ad6ff8dc223b4960571243a5d1c11885 2024-12-10T14:56:57,281 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(7330): checking classloading for ad6ff8dc223b4960571243a5d1c11885 2024-12-10T14:56:57,285 INFO [StoreOpener-54dee9bbb9571fe8ba05781cf679cda0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 54dee9bbb9571fe8ba05781cf679cda0 2024-12-10T14:56:57,285 INFO [StoreOpener-ad6ff8dc223b4960571243a5d1c11885-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region ad6ff8dc223b4960571243a5d1c11885 2024-12-10T14:56:57,287 INFO [StoreOpener-54dee9bbb9571fe8ba05781cf679cda0-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 54dee9bbb9571fe8ba05781cf679cda0 columnFamilyName cf 2024-12-10T14:56:57,287 DEBUG [StoreOpener-54dee9bbb9571fe8ba05781cf679cda0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:56:57,287 INFO [StoreOpener-54dee9bbb9571fe8ba05781cf679cda0-1 {}] regionserver.HStore(327): Store=54dee9bbb9571fe8ba05781cf679cda0/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T14:56:57,289 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion/54dee9bbb9571fe8ba05781cf679cda0 2024-12-10T14:56:57,289 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion/54dee9bbb9571fe8ba05781cf679cda0 2024-12-10T14:56:57,292 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(1085): writing seq id for 54dee9bbb9571fe8ba05781cf679cda0 2024-12-10T14:56:57,295 INFO [StoreOpener-ad6ff8dc223b4960571243a5d1c11885-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ad6ff8dc223b4960571243a5d1c11885 columnFamilyName cf 2024-12-10T14:56:57,295 DEBUG [StoreOpener-ad6ff8dc223b4960571243a5d1c11885-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:56:57,295 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion/54dee9bbb9571fe8ba05781cf679cda0/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T14:56:57,295 INFO [StoreOpener-ad6ff8dc223b4960571243a5d1c11885-1 {}] regionserver.HStore(327): Store=ad6ff8dc223b4960571243a5d1c11885/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T14:56:57,296 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion/ad6ff8dc223b4960571243a5d1c11885 2024-12-10T14:56:57,297 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(1102): Opened 54dee9bbb9571fe8ba05781cf679cda0; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71212425, jitterRate=0.06114782392978668}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-10T14:56:57,298 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(1001): Region open journal for 54dee9bbb9571fe8ba05781cf679cda0: 2024-12-10T14:56:57,298 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion/ad6ff8dc223b4960571243a5d1c11885 2024-12-10T14:56:57,299 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,,1733842616828.54dee9bbb9571fe8ba05781cf679cda0., pid=102, masterSystemTime=1733842617271 2024-12-10T14:56:57,301 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(1085): writing seq id for ad6ff8dc223b4960571243a5d1c11885 2024-12-10T14:56:57,301 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,,1733842616828.54dee9bbb9571fe8ba05781cf679cda0. 2024-12-10T14:56:57,301 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithMergeRegion,,1733842616828.54dee9bbb9571fe8ba05781cf679cda0. 2024-12-10T14:56:57,301 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=100 updating hbase:meta row=54dee9bbb9571fe8ba05781cf679cda0, regionState=OPEN, openSeqNum=2, regionLocation=db1d50717577,37707,1733842502956 2024-12-10T14:56:57,303 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion/ad6ff8dc223b4960571243a5d1c11885/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T14:56:57,303 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(1102): Opened ad6ff8dc223b4960571243a5d1c11885; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62434098, jitterRate=-0.06965944170951843}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-10T14:56:57,304 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(1001): Region open journal for ad6ff8dc223b4960571243a5d1c11885: 2024-12-10T14:56:57,305 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,1,1733842616828.ad6ff8dc223b4960571243a5d1c11885., pid=103, masterSystemTime=1733842617273 2024-12-10T14:56:57,307 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,1,1733842616828.ad6ff8dc223b4960571243a5d1c11885. 2024-12-10T14:56:57,307 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithMergeRegion,1,1733842616828.ad6ff8dc223b4960571243a5d1c11885. 2024-12-10T14:56:57,307 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=102, resume processing ppid=100 2024-12-10T14:56:57,307 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=102, ppid=100, state=SUCCESS; OpenRegionProcedure 54dee9bbb9571fe8ba05781cf679cda0, server=db1d50717577,37707,1733842502956 in 185 msec 2024-12-10T14:56:57,308 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=101 updating hbase:meta row=ad6ff8dc223b4960571243a5d1c11885, regionState=OPEN, openSeqNum=2, regionLocation=db1d50717577,41775,1733842502887 2024-12-10T14:56:57,309 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=100, ppid=99, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=54dee9bbb9571fe8ba05781cf679cda0, ASSIGN in 344 msec 2024-12-10T14:56:57,313 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=103, resume processing ppid=101 2024-12-10T14:56:57,313 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=103, ppid=101, state=SUCCESS; OpenRegionProcedure ad6ff8dc223b4960571243a5d1c11885, server=db1d50717577,41775,1733842502887 in 189 msec 2024-12-10T14:56:57,315 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=101, resume processing ppid=99 2024-12-10T14:56:57,315 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=101, ppid=99, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=ad6ff8dc223b4960571243a5d1c11885, ASSIGN in 350 msec 2024-12-10T14:56:57,315 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=99, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-10T14:56:57,316 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733842617315"}]},"ts":"1733842617315"} 2024-12-10T14:56:57,320 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLED in hbase:meta 2024-12-10T14:56:57,324 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=99, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_POST_OPERATION 2024-12-10T14:56:57,325 DEBUG [PEWorker-2 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion jenkins: RWXCA 2024-12-10T14:56:57,327 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41775 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-10T14:56:57,329 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45255-0x1019cdee8310003, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:56:57,329 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:56:57,329 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:56:57,329 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:56:57,331 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-10T14:56:57,331 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-10T14:56:57,332 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-10T14:56:57,332 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-10T14:56:57,341 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=99, state=SUCCESS; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 504 msec 2024-12-10T14:56:57,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-12-10T14:56:57,438 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion, procId: 99 completed 2024-12-10T14:56:57,438 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testExportFileSystemStateWithMergeRegion get assigned. Timeout = 60000ms 2024-12-10T14:56:57,438 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-10T14:56:57,444 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testExportFileSystemStateWithMergeRegion assigned to meta. Checking AM states. 2024-12-10T14:56:57,444 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-10T14:56:57,444 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testExportFileSystemStateWithMergeRegion assigned. 2024-12-10T14:56:57,451 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-10T14:56:57,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733842617451 (current time:1733842617451). 2024-12-10T14:56:57,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-10T14:56:57,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-12-10T14:56:57,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-10T14:56:57,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0c580b05 to 127.0.0.1:57176 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5c9f1ada 2024-12-10T14:56:57,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@67e8e257, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:56:57,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:56:57,470 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48688, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:56:57,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0c580b05 to 127.0.0.1:57176 2024-12-10T14:56:57,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:56:57,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x75ae0c4b to 127.0.0.1:57176 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@51c381ee 2024-12-10T14:56:57,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@71192bdb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:56:57,489 DEBUG [hconnection-0x59912509-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:56:57,490 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48704, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:56:57,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:56:57,492 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60504, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:56:57,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x75ae0c4b to 127.0.0.1:57176 2024-12-10T14:56:57,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:56:57,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-10T14:56:57,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-10T14:56:57,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] procedure2.ProcedureExecutor(1098): Stored pid=104, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-10T14:56:57,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 104 2024-12-10T14:56:57,498 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-10T14:56:57,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-10T14:56:57,499 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-10T14:56:57,502 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-10T14:56:57,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742102_1278 (size=215) 2024-12-10T14:56:57,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742102_1278 (size=215) 2024-12-10T14:56:57,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742102_1278 (size=215) 2024-12-10T14:56:57,519 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-10T14:56:57,519 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=105, ppid=104, state=RUNNABLE; SnapshotRegionProcedure 54dee9bbb9571fe8ba05781cf679cda0}, {pid=106, ppid=104, state=RUNNABLE; SnapshotRegionProcedure ad6ff8dc223b4960571243a5d1c11885}] 2024-12-10T14:56:57,520 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=105, ppid=104, state=RUNNABLE; SnapshotRegionProcedure 54dee9bbb9571fe8ba05781cf679cda0 2024-12-10T14:56:57,521 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=106, ppid=104, state=RUNNABLE; SnapshotRegionProcedure ad6ff8dc223b4960571243a5d1c11885 2024-12-10T14:56:57,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-10T14:56:57,671 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,37707,1733842502956 2024-12-10T14:56:57,672 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37707 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=105 2024-12-10T14:56:57,672 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733842616828.54dee9bbb9571fe8ba05781cf679cda0. 2024-12-10T14:56:57,672 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,41775,1733842502887 2024-12-10T14:56:57,672 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegion(2538): Flush status journal for 54dee9bbb9571fe8ba05781cf679cda0: 2024-12-10T14:56:57,672 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1733842616828.54dee9bbb9571fe8ba05781cf679cda0. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-10T14:56:57,672 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1733842616828.54dee9bbb9571fe8ba05781cf679cda0.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-10T14:56:57,673 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-10T14:56:57,673 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-10T14:56:57,673 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41775 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=106 2024-12-10T14:56:57,673 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733842616828.ad6ff8dc223b4960571243a5d1c11885. 2024-12-10T14:56:57,673 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegion(2538): Flush status journal for ad6ff8dc223b4960571243a5d1c11885: 2024-12-10T14:56:57,673 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1733842616828.ad6ff8dc223b4960571243a5d1c11885. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-10T14:56:57,673 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1733842616828.ad6ff8dc223b4960571243a5d1c11885.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-10T14:56:57,673 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-10T14:56:57,673 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-10T14:56:57,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742103_1279 (size=86) 2024-12-10T14:56:57,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742103_1279 (size=86) 2024-12-10T14:56:57,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742104_1280 (size=86) 2024-12-10T14:56:57,681 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733842616828.54dee9bbb9571fe8ba05781cf679cda0. 2024-12-10T14:56:57,681 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=105 2024-12-10T14:56:57,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.HMaster(4106): Remote procedure done, pid=105 2024-12-10T14:56:57,682 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region 54dee9bbb9571fe8ba05781cf679cda0 2024-12-10T14:56:57,682 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=105, ppid=104, state=RUNNABLE; SnapshotRegionProcedure 54dee9bbb9571fe8ba05781cf679cda0 2024-12-10T14:56:57,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742103_1279 (size=86) 2024-12-10T14:56:57,685 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=105, ppid=104, state=SUCCESS; SnapshotRegionProcedure 54dee9bbb9571fe8ba05781cf679cda0 in 164 msec 2024-12-10T14:56:57,686 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733842616828.ad6ff8dc223b4960571243a5d1c11885. 2024-12-10T14:56:57,686 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=106 2024-12-10T14:56:57,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.HMaster(4106): Remote procedure done, pid=106 2024-12-10T14:56:57,686 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region ad6ff8dc223b4960571243a5d1c11885 2024-12-10T14:56:57,686 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=106, ppid=104, state=RUNNABLE; SnapshotRegionProcedure ad6ff8dc223b4960571243a5d1c11885 2024-12-10T14:56:57,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742104_1280 (size=86) 2024-12-10T14:56:57,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742104_1280 (size=86) 2024-12-10T14:56:57,689 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=106, resume processing ppid=104 2024-12-10T14:56:57,689 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-10T14:56:57,689 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=106, ppid=104, state=SUCCESS; SnapshotRegionProcedure ad6ff8dc223b4960571243a5d1c11885 in 168 msec 2024-12-10T14:56:57,690 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-10T14:56:57,691 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-10T14:56:57,691 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-10T14:56:57,692 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-10T14:56:57,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742105_1281 (size=597) 2024-12-10T14:56:57,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742105_1281 (size=597) 2024-12-10T14:56:57,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742105_1281 (size=597) 2024-12-10T14:56:57,703 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-10T14:56:57,708 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-10T14:56:57,708 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-10T14:56:57,710 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-10T14:56:57,710 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 104 2024-12-10T14:56:57,711 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 214 msec 2024-12-10T14:56:57,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-10T14:56:57,804 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion, procId: 104 completed 2024-12-10T14:56:57,811 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37707 {}] regionserver.HRegion(8254): writing data to region testtb-testExportFileSystemStateWithMergeRegion,,1733842616828.54dee9bbb9571fe8ba05781cf679cda0. with WAL disabled. Data may be lost in the event of a crash. 2024-12-10T14:56:57,812 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41775 {}] regionserver.HRegion(8254): writing data to region testtb-testExportFileSystemStateWithMergeRegion,1,1733842616828.ad6ff8dc223b4960571243a5d1c11885. with WAL disabled. Data may be lost in the event of a crash. 2024-12-10T14:56:57,815 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportFileSystemStateWithMergeRegion 2024-12-10T14:56:57,815 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportFileSystemStateWithMergeRegion,,1733842616828.54dee9bbb9571fe8ba05781cf679cda0. 2024-12-10T14:56:57,815 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-10T14:56:57,825 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-10T14:56:57,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733842617826 (current time:1733842617826). 2024-12-10T14:56:57,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-10T14:56:57,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-12-10T14:56:57,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-10T14:56:57,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x292636a3 to 127.0.0.1:57176 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@794ff7e3 2024-12-10T14:56:57,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@603f8778, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:56:57,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:56:57,832 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48718, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:56:57,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x292636a3 to 127.0.0.1:57176 2024-12-10T14:56:57,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:56:57,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2535fde5 to 127.0.0.1:57176 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@23c3f64f 2024-12-10T14:56:57,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3fdba108, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:56:57,838 DEBUG [hconnection-0x70148903-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:56:57,839 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48730, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:56:57,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:56:57,842 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60512, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:56:57,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2535fde5 to 127.0.0.1:57176 2024-12-10T14:56:57,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:56:57,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-10T14:56:57,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-10T14:56:57,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] procedure2.ProcedureExecutor(1098): Stored pid=107, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-10T14:56:57,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 107 2024-12-10T14:56:57,846 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-10T14:56:57,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=107 2024-12-10T14:56:57,847 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-10T14:56:57,849 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-10T14:56:57,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742106_1282 (size=210) 2024-12-10T14:56:57,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742106_1282 (size=210) 2024-12-10T14:56:57,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742106_1282 (size=210) 2024-12-10T14:56:57,856 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-10T14:56:57,856 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=108, ppid=107, state=RUNNABLE; SnapshotRegionProcedure 54dee9bbb9571fe8ba05781cf679cda0}, {pid=109, ppid=107, state=RUNNABLE; SnapshotRegionProcedure ad6ff8dc223b4960571243a5d1c11885}] 2024-12-10T14:56:57,857 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=109, ppid=107, state=RUNNABLE; SnapshotRegionProcedure ad6ff8dc223b4960571243a5d1c11885 2024-12-10T14:56:57,857 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=108, ppid=107, state=RUNNABLE; SnapshotRegionProcedure 54dee9bbb9571fe8ba05781cf679cda0 2024-12-10T14:56:57,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=107 2024-12-10T14:56:58,008 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,41775,1733842502887 2024-12-10T14:56:58,008 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,37707,1733842502956 2024-12-10T14:56:58,009 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41775 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=109 2024-12-10T14:56:58,009 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37707 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=108 2024-12-10T14:56:58,009 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733842616828.54dee9bbb9571fe8ba05781cf679cda0. 2024-12-10T14:56:58,009 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733842616828.ad6ff8dc223b4960571243a5d1c11885. 2024-12-10T14:56:58,009 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.HRegion(2837): Flushing 54dee9bbb9571fe8ba05781cf679cda0 1/1 column families, dataSize=266 B heapSize=832 B 2024-12-10T14:56:58,009 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.HRegion(2837): Flushing ad6ff8dc223b4960571243a5d1c11885 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-12-10T14:56:58,026 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion/54dee9bbb9571fe8ba05781cf679cda0/.tmp/cf/7950909cecce407680c9226c2913b6e5 is 71, key is 007eac3f508658f52c2f7c0e822ff1e9/cf:q/1733842617810/Put/seqid=0 2024-12-10T14:56:58,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742107_1283 (size=5354) 2024-12-10T14:56:58,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742107_1283 (size=5354) 2024-12-10T14:56:58,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742107_1283 (size=5354) 2024-12-10T14:56:58,032 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=266 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion/54dee9bbb9571fe8ba05781cf679cda0/.tmp/cf/7950909cecce407680c9226c2913b6e5 2024-12-10T14:56:58,033 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion/ad6ff8dc223b4960571243a5d1c11885/.tmp/cf/6208e827017f45509e36239a27948ef3 is 71, key is 179c847175166f45848a68da39f233ff/cf:q/1733842617812/Put/seqid=0 2024-12-10T14:56:58,038 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion/54dee9bbb9571fe8ba05781cf679cda0/.tmp/cf/7950909cecce407680c9226c2913b6e5 as hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion/54dee9bbb9571fe8ba05781cf679cda0/cf/7950909cecce407680c9226c2913b6e5 2024-12-10T14:56:58,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742108_1284 (size=8258) 2024-12-10T14:56:58,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742108_1284 (size=8258) 2024-12-10T14:56:58,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742108_1284 (size=8258) 2024-12-10T14:56:58,040 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.00 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion/ad6ff8dc223b4960571243a5d1c11885/.tmp/cf/6208e827017f45509e36239a27948ef3 2024-12-10T14:56:58,043 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion/54dee9bbb9571fe8ba05781cf679cda0/cf/7950909cecce407680c9226c2913b6e5, entries=4, sequenceid=6, filesize=5.2 K 2024-12-10T14:56:58,044 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.HRegion(3040): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 54dee9bbb9571fe8ba05781cf679cda0 in 35ms, sequenceid=6, compaction requested=false 2024-12-10T14:56:58,044 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion' 2024-12-10T14:56:58,045 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion/ad6ff8dc223b4960571243a5d1c11885/.tmp/cf/6208e827017f45509e36239a27948ef3 as hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion/ad6ff8dc223b4960571243a5d1c11885/cf/6208e827017f45509e36239a27948ef3 2024-12-10T14:56:58,045 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.HRegion(2538): Flush status journal for 54dee9bbb9571fe8ba05781cf679cda0: 2024-12-10T14:56:58,045 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1733842616828.54dee9bbb9571fe8ba05781cf679cda0. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-10T14:56:58,045 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1733842616828.54dee9bbb9571fe8ba05781cf679cda0.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-10T14:56:58,045 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-10T14:56:58,045 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion/54dee9bbb9571fe8ba05781cf679cda0/cf/7950909cecce407680c9226c2913b6e5] hfiles 2024-12-10T14:56:58,045 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion/54dee9bbb9571fe8ba05781cf679cda0/cf/7950909cecce407680c9226c2913b6e5 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-10T14:56:58,050 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion/ad6ff8dc223b4960571243a5d1c11885/cf/6208e827017f45509e36239a27948ef3, entries=46, sequenceid=6, filesize=8.1 K 2024-12-10T14:56:58,051 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.HRegion(3040): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for ad6ff8dc223b4960571243a5d1c11885 in 42ms, sequenceid=6, compaction requested=false 2024-12-10T14:56:58,051 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.HRegion(2538): Flush status journal for ad6ff8dc223b4960571243a5d1c11885: 2024-12-10T14:56:58,051 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1733842616828.ad6ff8dc223b4960571243a5d1c11885. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-10T14:56:58,051 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1733842616828.ad6ff8dc223b4960571243a5d1c11885.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-10T14:56:58,051 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-10T14:56:58,051 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion/ad6ff8dc223b4960571243a5d1c11885/cf/6208e827017f45509e36239a27948ef3] hfiles 2024-12-10T14:56:58,051 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion/ad6ff8dc223b4960571243a5d1c11885/cf/6208e827017f45509e36239a27948ef3 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-10T14:56:58,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742109_1285 (size=125) 2024-12-10T14:56:58,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742109_1285 (size=125) 2024-12-10T14:56:58,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742109_1285 (size=125) 2024-12-10T14:56:58,058 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733842616828.54dee9bbb9571fe8ba05781cf679cda0. 2024-12-10T14:56:58,059 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=108 2024-12-10T14:56:58,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.HMaster(4106): Remote procedure done, pid=108 2024-12-10T14:56:58,059 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region 54dee9bbb9571fe8ba05781cf679cda0 2024-12-10T14:56:58,059 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=108, ppid=107, state=RUNNABLE; SnapshotRegionProcedure 54dee9bbb9571fe8ba05781cf679cda0 2024-12-10T14:56:58,061 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=108, ppid=107, state=SUCCESS; SnapshotRegionProcedure 54dee9bbb9571fe8ba05781cf679cda0 in 204 msec 2024-12-10T14:56:58,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742110_1286 (size=125) 2024-12-10T14:56:58,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742110_1286 (size=125) 2024-12-10T14:56:58,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742110_1286 (size=125) 2024-12-10T14:56:58,069 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733842616828.ad6ff8dc223b4960571243a5d1c11885. 2024-12-10T14:56:58,069 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=109 2024-12-10T14:56:58,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.HMaster(4106): Remote procedure done, pid=109 2024-12-10T14:56:58,069 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region ad6ff8dc223b4960571243a5d1c11885 2024-12-10T14:56:58,069 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=109, ppid=107, state=RUNNABLE; SnapshotRegionProcedure ad6ff8dc223b4960571243a5d1c11885 2024-12-10T14:56:58,071 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=109, resume processing ppid=107 2024-12-10T14:56:58,071 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=109, ppid=107, state=SUCCESS; SnapshotRegionProcedure ad6ff8dc223b4960571243a5d1c11885 in 214 msec 2024-12-10T14:56:58,071 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-10T14:56:58,072 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-10T14:56:58,072 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-10T14:56:58,072 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-10T14:56:58,073 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-10T14:56:58,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742111_1287 (size=675) 2024-12-10T14:56:58,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742111_1287 (size=675) 2024-12-10T14:56:58,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742111_1287 (size=675) 2024-12-10T14:56:58,087 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-10T14:56:58,092 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-10T14:56:58,092 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-10T14:56:58,093 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-10T14:56:58,094 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 107 2024-12-10T14:56:58,095 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=107, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 250 msec 2024-12-10T14:56:58,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=107 2024-12-10T14:56:58,148 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion, procId: 107 completed 2024-12-10T14:56:58,168 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-10T14:56:58,169 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48740, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-10T14:56:58,170 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37707 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-10T14:56:58,171 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-10T14:56:58,173 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60514, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-10T14:56:58,173 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41775 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-10T14:56:58,173 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-10T14:56:58,174 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58512, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-10T14:56:58,175 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45255 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-10T14:56:58,176 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithMergeRegion-1', {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-10T14:56:58,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] procedure2.ProcedureExecutor(1098): Stored pid=110, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-10T14:56:58,178 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=110, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_PRE_OPERATION 2024-12-10T14:56:58,178 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:56:58,178 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion-1" procId is: 110 2024-12-10T14:56:58,179 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=110, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-10T14:56:58,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-10T14:56:58,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742112_1288 (size=399) 2024-12-10T14:56:58,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742112_1288 (size=399) 2024-12-10T14:56:58,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742112_1288 (size=399) 2024-12-10T14:56:58,187 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 36745f7a1ea9bfbd1ff9249afef0a057, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733842618176.36745f7a1ea9bfbd1ff9249afef0a057.', STARTKEY => '', ENDKEY => '2'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef 2024-12-10T14:56:58,188 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 91bd679bdab76a50ec1680281808a38a, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1733842618176.91bd679bdab76a50ec1680281808a38a.', STARTKEY => '2', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef 2024-12-10T14:56:58,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742114_1290 (size=85) 2024-12-10T14:56:58,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742113_1289 (size=85) 2024-12-10T14:56:58,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742113_1289 (size=85) 2024-12-10T14:56:58,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742114_1290 (size=85) 2024-12-10T14:56:58,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742113_1289 (size=85) 2024-12-10T14:56:58,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742114_1290 (size=85) 2024-12-10T14:56:58,199 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1733842618176.36745f7a1ea9bfbd1ff9249afef0a057.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:56:58,199 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1733842618176.91bd679bdab76a50ec1680281808a38a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:56:58,199 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1681): Closing 91bd679bdab76a50ec1680281808a38a, disabling compactions & flushes 2024-12-10T14:56:58,199 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1681): Closing 36745f7a1ea9bfbd1ff9249afef0a057, disabling compactions & flushes 2024-12-10T14:56:58,199 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733842618176.91bd679bdab76a50ec1680281808a38a. 2024-12-10T14:56:58,199 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733842618176.91bd679bdab76a50ec1680281808a38a. 2024-12-10T14:56:58,199 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1733842618176.36745f7a1ea9bfbd1ff9249afef0a057. 2024-12-10T14:56:58,199 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733842618176.91bd679bdab76a50ec1680281808a38a. after waiting 0 ms 2024-12-10T14:56:58,199 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733842618176.91bd679bdab76a50ec1680281808a38a. 2024-12-10T14:56:58,199 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733842618176.36745f7a1ea9bfbd1ff9249afef0a057. 2024-12-10T14:56:58,199 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1733842618176.91bd679bdab76a50ec1680281808a38a. 2024-12-10T14:56:58,199 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733842618176.36745f7a1ea9bfbd1ff9249afef0a057. after waiting 0 ms 2024-12-10T14:56:58,199 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1635): Region close journal for 91bd679bdab76a50ec1680281808a38a: 2024-12-10T14:56:58,199 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1733842618176.36745f7a1ea9bfbd1ff9249afef0a057. 2024-12-10T14:56:58,200 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1733842618176.36745f7a1ea9bfbd1ff9249afef0a057. 2024-12-10T14:56:58,200 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1635): Region close journal for 36745f7a1ea9bfbd1ff9249afef0a057: 2024-12-10T14:56:58,201 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=110, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ADD_TO_META 2024-12-10T14:56:58,201 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1733842618176.91bd679bdab76a50ec1680281808a38a.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1733842618201"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733842618201"}]},"ts":"1733842618201"} 2024-12-10T14:56:58,201 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733842618176.36745f7a1ea9bfbd1ff9249afef0a057.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1733842618201"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733842618201"}]},"ts":"1733842618201"} 2024-12-10T14:56:58,203 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-10T14:56:58,203 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=110, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-10T14:56:58,204 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733842618203"}]},"ts":"1733842618203"} 2024-12-10T14:56:58,205 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLING in hbase:meta 2024-12-10T14:56:58,209 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(202): Hosts are {db1d50717577=0} racks are {/default-rack=0} 2024-12-10T14:56:58,210 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-10T14:56:58,210 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-10T14:56:58,210 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-10T14:56:58,210 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-10T14:56:58,210 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-10T14:56:58,210 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-10T14:56:58,210 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-10T14:56:58,210 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=36745f7a1ea9bfbd1ff9249afef0a057, ASSIGN}, {pid=112, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=91bd679bdab76a50ec1680281808a38a, ASSIGN}] 2024-12-10T14:56:58,211 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=111, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=36745f7a1ea9bfbd1ff9249afef0a057, ASSIGN 2024-12-10T14:56:58,211 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=112, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=91bd679bdab76a50ec1680281808a38a, ASSIGN 2024-12-10T14:56:58,212 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=112, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=91bd679bdab76a50ec1680281808a38a, ASSIGN; state=OFFLINE, location=db1d50717577,45255,1733842503015; forceNewPlan=false, retain=false 2024-12-10T14:56:58,212 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=111, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=36745f7a1ea9bfbd1ff9249afef0a057, ASSIGN; state=OFFLINE, location=db1d50717577,41775,1733842502887; forceNewPlan=false, retain=false 2024-12-10T14:56:58,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-10T14:56:58,363 INFO [db1d50717577:36533 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-10T14:56:58,363 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=112 updating hbase:meta row=91bd679bdab76a50ec1680281808a38a, regionState=OPENING, regionLocation=db1d50717577,45255,1733842503015 2024-12-10T14:56:58,363 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=111 updating hbase:meta row=36745f7a1ea9bfbd1ff9249afef0a057, regionState=OPENING, regionLocation=db1d50717577,41775,1733842502887 2024-12-10T14:56:58,364 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=113, ppid=112, state=RUNNABLE; OpenRegionProcedure 91bd679bdab76a50ec1680281808a38a, server=db1d50717577,45255,1733842503015}] 2024-12-10T14:56:58,365 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=114, ppid=111, state=RUNNABLE; OpenRegionProcedure 36745f7a1ea9bfbd1ff9249afef0a057, server=db1d50717577,41775,1733842502887}] 2024-12-10T14:56:58,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-10T14:56:58,516 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,45255,1733842503015 2024-12-10T14:56:58,517 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,41775,1733842502887 2024-12-10T14:56:58,520 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithMergeRegion-1,2,1733842618176.91bd679bdab76a50ec1680281808a38a. 2024-12-10T14:56:58,520 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1733842618176.36745f7a1ea9bfbd1ff9249afef0a057. 2024-12-10T14:56:58,520 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(7285): Opening region: {ENCODED => 91bd679bdab76a50ec1680281808a38a, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1733842618176.91bd679bdab76a50ec1680281808a38a.', STARTKEY => '2', ENDKEY => ''} 2024-12-10T14:56:58,520 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(7285): Opening region: {ENCODED => 36745f7a1ea9bfbd1ff9249afef0a057, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733842618176.36745f7a1ea9bfbd1ff9249afef0a057.', STARTKEY => '', ENDKEY => '2'} 2024-12-10T14:56:58,520 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,2,1733842618176.91bd679bdab76a50ec1680281808a38a. service=AccessControlService 2024-12-10T14:56:58,521 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1733842618176.36745f7a1ea9bfbd1ff9249afef0a057. service=AccessControlService 2024-12-10T14:56:58,521 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-10T14:56:58,521 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-10T14:56:58,521 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 91bd679bdab76a50ec1680281808a38a 2024-12-10T14:56:58,521 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1733842618176.91bd679bdab76a50ec1680281808a38a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:56:58,521 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(7327): checking encryption for 91bd679bdab76a50ec1680281808a38a 2024-12-10T14:56:58,521 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 36745f7a1ea9bfbd1ff9249afef0a057 2024-12-10T14:56:58,521 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(7330): checking classloading for 91bd679bdab76a50ec1680281808a38a 2024-12-10T14:56:58,521 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1733842618176.36745f7a1ea9bfbd1ff9249afef0a057.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:56:58,521 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(7327): checking encryption for 36745f7a1ea9bfbd1ff9249afef0a057 2024-12-10T14:56:58,521 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(7330): checking classloading for 36745f7a1ea9bfbd1ff9249afef0a057 2024-12-10T14:56:58,522 INFO [StoreOpener-91bd679bdab76a50ec1680281808a38a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 91bd679bdab76a50ec1680281808a38a 2024-12-10T14:56:58,522 INFO [StoreOpener-36745f7a1ea9bfbd1ff9249afef0a057-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 36745f7a1ea9bfbd1ff9249afef0a057 2024-12-10T14:56:58,524 INFO [StoreOpener-36745f7a1ea9bfbd1ff9249afef0a057-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 36745f7a1ea9bfbd1ff9249afef0a057 columnFamilyName cf 2024-12-10T14:56:58,524 INFO [StoreOpener-91bd679bdab76a50ec1680281808a38a-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 91bd679bdab76a50ec1680281808a38a columnFamilyName cf 2024-12-10T14:56:58,524 DEBUG [StoreOpener-36745f7a1ea9bfbd1ff9249afef0a057-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:56:58,524 DEBUG [StoreOpener-91bd679bdab76a50ec1680281808a38a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:56:58,524 INFO [StoreOpener-36745f7a1ea9bfbd1ff9249afef0a057-1 {}] regionserver.HStore(327): Store=36745f7a1ea9bfbd1ff9249afef0a057/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T14:56:58,524 INFO [StoreOpener-91bd679bdab76a50ec1680281808a38a-1 {}] regionserver.HStore(327): Store=91bd679bdab76a50ec1680281808a38a/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T14:56:58,525 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/36745f7a1ea9bfbd1ff9249afef0a057 2024-12-10T14:56:58,525 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/91bd679bdab76a50ec1680281808a38a 2024-12-10T14:56:58,525 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/36745f7a1ea9bfbd1ff9249afef0a057 2024-12-10T14:56:58,526 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/91bd679bdab76a50ec1680281808a38a 2024-12-10T14:56:58,527 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(1085): writing seq id for 36745f7a1ea9bfbd1ff9249afef0a057 2024-12-10T14:56:58,528 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(1085): writing seq id for 91bd679bdab76a50ec1680281808a38a 2024-12-10T14:56:58,529 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/36745f7a1ea9bfbd1ff9249afef0a057/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T14:56:58,530 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(1102): Opened 36745f7a1ea9bfbd1ff9249afef0a057; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68690973, jitterRate=0.02357526123523712}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-10T14:56:58,530 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/91bd679bdab76a50ec1680281808a38a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T14:56:58,530 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(1001): Region open journal for 36745f7a1ea9bfbd1ff9249afef0a057: 2024-12-10T14:56:58,530 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(1102): Opened 91bd679bdab76a50ec1680281808a38a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66387121, jitterRate=-0.010754808783531189}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-10T14:56:58,531 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(1001): Region open journal for 91bd679bdab76a50ec1680281808a38a: 2024-12-10T14:56:58,531 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1733842618176.36745f7a1ea9bfbd1ff9249afef0a057., pid=114, masterSystemTime=1733842618517 2024-12-10T14:56:58,531 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,2,1733842618176.91bd679bdab76a50ec1680281808a38a., pid=113, masterSystemTime=1733842618516 2024-12-10T14:56:58,533 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1733842618176.36745f7a1ea9bfbd1ff9249afef0a057. 2024-12-10T14:56:58,533 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1733842618176.36745f7a1ea9bfbd1ff9249afef0a057. 2024-12-10T14:56:58,533 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=111 updating hbase:meta row=36745f7a1ea9bfbd1ff9249afef0a057, regionState=OPEN, openSeqNum=2, regionLocation=db1d50717577,41775,1733842502887 2024-12-10T14:56:58,533 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,2,1733842618176.91bd679bdab76a50ec1680281808a38a. 2024-12-10T14:56:58,534 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithMergeRegion-1,2,1733842618176.91bd679bdab76a50ec1680281808a38a. 2024-12-10T14:56:58,534 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=112 updating hbase:meta row=91bd679bdab76a50ec1680281808a38a, regionState=OPEN, openSeqNum=2, regionLocation=db1d50717577,45255,1733842503015 2024-12-10T14:56:58,536 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=114, resume processing ppid=111 2024-12-10T14:56:58,536 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=114, ppid=111, state=SUCCESS; OpenRegionProcedure 36745f7a1ea9bfbd1ff9249afef0a057, server=db1d50717577,41775,1733842502887 in 170 msec 2024-12-10T14:56:58,538 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=113, resume processing ppid=112 2024-12-10T14:56:58,538 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=113, ppid=112, state=SUCCESS; OpenRegionProcedure 91bd679bdab76a50ec1680281808a38a, server=db1d50717577,45255,1733842503015 in 171 msec 2024-12-10T14:56:58,538 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=111, ppid=110, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=36745f7a1ea9bfbd1ff9249afef0a057, ASSIGN in 326 msec 2024-12-10T14:56:58,539 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=112, resume processing ppid=110 2024-12-10T14:56:58,539 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=112, ppid=110, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=91bd679bdab76a50ec1680281808a38a, ASSIGN in 328 msec 2024-12-10T14:56:58,540 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=110, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-10T14:56:58,540 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733842618540"}]},"ts":"1733842618540"} 2024-12-10T14:56:58,542 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLED in hbase:meta 2024-12-10T14:56:58,544 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=110, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_POST_OPERATION 2024-12-10T14:56:58,544 DEBUG [PEWorker-5 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion-1 jenkins: RWXCA 2024-12-10T14:56:58,546 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41775 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-12-10T14:56:58,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:56:58,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45255-0x1019cdee8310003, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:56:58,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:56:58,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:56:58,551 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-10T14:56:58,551 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-10T14:56:58,551 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-10T14:56:58,551 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-10T14:56:58,551 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-10T14:56:58,551 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-10T14:56:58,551 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-10T14:56:58,551 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-10T14:56:58,552 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=110, state=SUCCESS; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 374 msec 2024-12-10T14:56:58,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-10T14:56:58,782 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1, procId: 110 completed 2024-12-10T14:56:58,801 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.HMaster$2(2219): Client=jenkins//172.17.0.2 merge regions [36745f7a1ea9bfbd1ff9249afef0a057, 91bd679bdab76a50ec1680281808a38a] 2024-12-10T14:56:58,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] procedure2.ProcedureExecutor(1098): Stored pid=115, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[36745f7a1ea9bfbd1ff9249afef0a057, 91bd679bdab76a50ec1680281808a38a], force=true 2024-12-10T14:56:58,807 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=115, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[36745f7a1ea9bfbd1ff9249afef0a057, 91bd679bdab76a50ec1680281808a38a], force=true 2024-12-10T14:56:58,807 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=115, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[36745f7a1ea9bfbd1ff9249afef0a057, 91bd679bdab76a50ec1680281808a38a], force=true 2024-12-10T14:56:58,807 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=115, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[36745f7a1ea9bfbd1ff9249afef0a057, 91bd679bdab76a50ec1680281808a38a], force=true 2024-12-10T14:56:58,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-12-10T14:56:58,825 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=116, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=36745f7a1ea9bfbd1ff9249afef0a057, UNASSIGN}, {pid=117, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=91bd679bdab76a50ec1680281808a38a, UNASSIGN}] 2024-12-10T14:56:58,825 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=116, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=36745f7a1ea9bfbd1ff9249afef0a057, UNASSIGN 2024-12-10T14:56:58,825 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=117, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=91bd679bdab76a50ec1680281808a38a, UNASSIGN 2024-12-10T14:56:58,826 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=116 updating hbase:meta row=36745f7a1ea9bfbd1ff9249afef0a057, regionState=CLOSING, regionLocation=db1d50717577,41775,1733842502887 2024-12-10T14:56:58,826 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=117 updating hbase:meta row=91bd679bdab76a50ec1680281808a38a, regionState=CLOSING, regionLocation=db1d50717577,45255,1733842503015 2024-12-10T14:56:58,827 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-10T14:56:58,827 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=118, ppid=117, state=RUNNABLE; CloseRegionProcedure 91bd679bdab76a50ec1680281808a38a, server=db1d50717577,45255,1733842503015}] 2024-12-10T14:56:58,828 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-10T14:56:58,828 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=119, ppid=116, state=RUNNABLE; CloseRegionProcedure 36745f7a1ea9bfbd1ff9249afef0a057, server=db1d50717577,41775,1733842502887}] 2024-12-10T14:56:58,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-12-10T14:56:58,979 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,45255,1733842503015 2024-12-10T14:56:58,979 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,41775,1733842502887 2024-12-10T14:56:58,980 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] handler.UnassignRegionHandler(124): Close 91bd679bdab76a50ec1680281808a38a 2024-12-10T14:56:58,980 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] handler.UnassignRegionHandler(138): Unassign region: split region: true: evictCache: true 2024-12-10T14:56:58,980 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] handler.UnassignRegionHandler(124): Close 36745f7a1ea9bfbd1ff9249afef0a057 2024-12-10T14:56:58,980 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1681): Closing 91bd679bdab76a50ec1680281808a38a, disabling compactions & flushes 2024-12-10T14:56:58,980 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] handler.UnassignRegionHandler(138): Unassign region: split region: true: evictCache: true 2024-12-10T14:56:58,980 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733842618176.91bd679bdab76a50ec1680281808a38a. 2024-12-10T14:56:58,980 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733842618176.91bd679bdab76a50ec1680281808a38a. 2024-12-10T14:56:58,980 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1681): Closing 36745f7a1ea9bfbd1ff9249afef0a057, disabling compactions & flushes 2024-12-10T14:56:58,980 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733842618176.91bd679bdab76a50ec1680281808a38a. after waiting 0 ms 2024-12-10T14:56:58,980 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733842618176.91bd679bdab76a50ec1680281808a38a. 2024-12-10T14:56:58,980 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1733842618176.36745f7a1ea9bfbd1ff9249afef0a057. 2024-12-10T14:56:58,980 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733842618176.36745f7a1ea9bfbd1ff9249afef0a057. 2024-12-10T14:56:58,980 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(2837): Flushing 91bd679bdab76a50ec1680281808a38a 1/1 column families, dataSize=24 B heapSize=352 B 2024-12-10T14:56:58,980 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733842618176.36745f7a1ea9bfbd1ff9249afef0a057. after waiting 0 ms 2024-12-10T14:56:58,980 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1733842618176.36745f7a1ea9bfbd1ff9249afef0a057. 2024-12-10T14:56:58,980 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(2837): Flushing 36745f7a1ea9bfbd1ff9249afef0a057 1/1 column families, dataSize=24 B heapSize=352 B 2024-12-10T14:56:59,001 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/36745f7a1ea9bfbd1ff9249afef0a057/.tmp/cf/e3bb42a5b11343a98728dced90d827dd is 28, key is 1/cf:/1733842618785/Put/seqid=0 2024-12-10T14:56:59,006 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/91bd679bdab76a50ec1680281808a38a/.tmp/cf/eb80b8683dcc4438979a5d4ea75629e9 is 28, key is 2/cf:/1733842618788/Put/seqid=0 2024-12-10T14:56:59,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742115_1291 (size=4945) 2024-12-10T14:56:59,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742115_1291 (size=4945) 2024-12-10T14:56:59,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742115_1291 (size=4945) 2024-12-10T14:56:59,018 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/36745f7a1ea9bfbd1ff9249afef0a057/.tmp/cf/e3bb42a5b11343a98728dced90d827dd 2024-12-10T14:56:59,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742116_1292 (size=4945) 2024-12-10T14:56:59,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742116_1292 (size=4945) 2024-12-10T14:56:59,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742116_1292 (size=4945) 2024-12-10T14:56:59,022 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/91bd679bdab76a50ec1680281808a38a/.tmp/cf/eb80b8683dcc4438979a5d4ea75629e9 2024-12-10T14:56:59,025 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/36745f7a1ea9bfbd1ff9249afef0a057/.tmp/cf/e3bb42a5b11343a98728dced90d827dd as hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/36745f7a1ea9bfbd1ff9249afef0a057/cf/e3bb42a5b11343a98728dced90d827dd 2024-12-10T14:56:59,027 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/91bd679bdab76a50ec1680281808a38a/.tmp/cf/eb80b8683dcc4438979a5d4ea75629e9 as hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/91bd679bdab76a50ec1680281808a38a/cf/eb80b8683dcc4438979a5d4ea75629e9 2024-12-10T14:56:59,031 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/36745f7a1ea9bfbd1ff9249afef0a057/cf/e3bb42a5b11343a98728dced90d827dd, entries=1, sequenceid=5, filesize=4.8 K 2024-12-10T14:56:59,032 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(3040): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 36745f7a1ea9bfbd1ff9249afef0a057 in 52ms, sequenceid=5, compaction requested=false 2024-12-10T14:56:59,032 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion-1' 2024-12-10T14:56:59,033 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/91bd679bdab76a50ec1680281808a38a/cf/eb80b8683dcc4438979a5d4ea75629e9, entries=1, sequenceid=5, filesize=4.8 K 2024-12-10T14:56:59,033 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(3040): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 91bd679bdab76a50ec1680281808a38a in 53ms, sequenceid=5, compaction requested=false 2024-12-10T14:56:59,037 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/36745f7a1ea9bfbd1ff9249afef0a057/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-10T14:56:59,038 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-10T14:56:59,038 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/91bd679bdab76a50ec1680281808a38a/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-10T14:56:59,038 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1733842618176.36745f7a1ea9bfbd1ff9249afef0a057. 2024-12-10T14:56:59,038 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1635): Region close journal for 36745f7a1ea9bfbd1ff9249afef0a057: 2024-12-10T14:56:59,038 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-10T14:56:59,038 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1733842618176.91bd679bdab76a50ec1680281808a38a. 2024-12-10T14:56:59,038 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1635): Region close journal for 91bd679bdab76a50ec1680281808a38a: 2024-12-10T14:56:59,039 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] handler.UnassignRegionHandler(170): Closed 36745f7a1ea9bfbd1ff9249afef0a057 2024-12-10T14:56:59,040 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=116 updating hbase:meta row=36745f7a1ea9bfbd1ff9249afef0a057, regionState=CLOSED 2024-12-10T14:56:59,040 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] handler.UnassignRegionHandler(170): Closed 91bd679bdab76a50ec1680281808a38a 2024-12-10T14:56:59,041 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=117 updating hbase:meta row=91bd679bdab76a50ec1680281808a38a, regionState=CLOSED 2024-12-10T14:56:59,043 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=119, resume processing ppid=116 2024-12-10T14:56:59,043 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=119, ppid=116, state=SUCCESS; CloseRegionProcedure 36745f7a1ea9bfbd1ff9249afef0a057, server=db1d50717577,41775,1733842502887 in 213 msec 2024-12-10T14:56:59,043 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=118, resume processing ppid=117 2024-12-10T14:56:59,044 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=118, ppid=117, state=SUCCESS; CloseRegionProcedure 91bd679bdab76a50ec1680281808a38a, server=db1d50717577,45255,1733842503015 in 215 msec 2024-12-10T14:56:59,044 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=116, ppid=115, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=36745f7a1ea9bfbd1ff9249afef0a057, UNASSIGN in 219 msec 2024-12-10T14:56:59,045 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=117, resume processing ppid=115 2024-12-10T14:56:59,045 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=117, ppid=115, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=91bd679bdab76a50ec1680281808a38a, UNASSIGN in 220 msec 2024-12-10T14:56:59,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742117_1293 (size=84) 2024-12-10T14:56:59,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742117_1293 (size=84) 2024-12-10T14:56:59,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742117_1293 (size=84) 2024-12-10T14:56:59,061 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:56:59,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742118_1294 (size=20) 2024-12-10T14:56:59,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742118_1294 (size=20) 2024-12-10T14:56:59,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742118_1294 (size=20) 2024-12-10T14:56:59,073 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:56:59,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742119_1295 (size=21) 2024-12-10T14:56:59,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742119_1295 (size=21) 2024-12-10T14:56:59,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742119_1295 (size=21) 2024-12-10T14:56:59,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742120_1296 (size=84) 2024-12-10T14:56:59,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742120_1296 (size=84) 2024-12-10T14:56:59,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742120_1296 (size=84) 2024-12-10T14:56:59,087 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:56:59,097 DEBUG [PEWorker-2 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/7989d98b9d2c3897b7c4cfb24cffcca9/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=-1 2024-12-10T14:56:59,099 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733842618176.36745f7a1ea9bfbd1ff9249afef0a057.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-10T14:56:59,099 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1733842618176.91bd679bdab76a50ec1680281808a38a.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-10T14:56:59,099 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":7,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733842618177.7989d98b9d2c3897b7c4cfb24cffcca9.","families":{"info":[{"qualifier":"regioninfo","vlen":83,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0000","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0001","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-10T14:56:59,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-12-10T14:56:59,130 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=120, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=7989d98b9d2c3897b7c4cfb24cffcca9, ASSIGN}] 2024-12-10T14:56:59,131 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=120, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=7989d98b9d2c3897b7c4cfb24cffcca9, ASSIGN 2024-12-10T14:56:59,131 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=120, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=7989d98b9d2c3897b7c4cfb24cffcca9, ASSIGN; state=MERGED, location=db1d50717577,41775,1733842502887; forceNewPlan=false, retain=false 2024-12-10T14:56:59,282 INFO [db1d50717577:36533 {}] balancer.BaseLoadBalancer(546): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-10T14:56:59,282 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=120 updating hbase:meta row=7989d98b9d2c3897b7c4cfb24cffcca9, regionState=OPENING, regionLocation=db1d50717577,41775,1733842502887 2024-12-10T14:56:59,283 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=121, ppid=120, state=RUNNABLE; OpenRegionProcedure 7989d98b9d2c3897b7c4cfb24cffcca9, server=db1d50717577,41775,1733842502887}] 2024-12-10T14:56:59,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-12-10T14:56:59,435 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,41775,1733842502887 2024-12-10T14:56:59,438 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1733842618177.7989d98b9d2c3897b7c4cfb24cffcca9. 2024-12-10T14:56:59,439 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(7285): Opening region: {ENCODED => 7989d98b9d2c3897b7c4cfb24cffcca9, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733842618177.7989d98b9d2c3897b7c4cfb24cffcca9.', STARTKEY => '', ENDKEY => ''} 2024-12-10T14:56:59,439 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1733842618177.7989d98b9d2c3897b7c4cfb24cffcca9. service=AccessControlService 2024-12-10T14:56:59,439 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-10T14:56:59,439 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 7989d98b9d2c3897b7c4cfb24cffcca9 2024-12-10T14:56:59,439 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1733842618177.7989d98b9d2c3897b7c4cfb24cffcca9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:56:59,440 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(7327): checking encryption for 7989d98b9d2c3897b7c4cfb24cffcca9 2024-12-10T14:56:59,440 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(7330): checking classloading for 7989d98b9d2c3897b7c4cfb24cffcca9 2024-12-10T14:56:59,441 INFO [StoreOpener-7989d98b9d2c3897b7c4cfb24cffcca9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 7989d98b9d2c3897b7c4cfb24cffcca9 2024-12-10T14:56:59,442 INFO [StoreOpener-7989d98b9d2c3897b7c4cfb24cffcca9-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7989d98b9d2c3897b7c4cfb24cffcca9 columnFamilyName cf 2024-12-10T14:56:59,442 DEBUG [StoreOpener-7989d98b9d2c3897b7c4cfb24cffcca9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:56:59,473 DEBUG [StoreOpener-7989d98b9d2c3897b7c4cfb24cffcca9-1 {}] regionserver.StoreEngine(277): loaded hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/7989d98b9d2c3897b7c4cfb24cffcca9/cf/e3bb42a5b11343a98728dced90d827dd.36745f7a1ea9bfbd1ff9249afef0a057->hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/36745f7a1ea9bfbd1ff9249afef0a057/cf/e3bb42a5b11343a98728dced90d827dd-top 2024-12-10T14:56:59,480 DEBUG [StoreOpener-7989d98b9d2c3897b7c4cfb24cffcca9-1 {}] regionserver.StoreEngine(277): loaded hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/7989d98b9d2c3897b7c4cfb24cffcca9/cf/eb80b8683dcc4438979a5d4ea75629e9.91bd679bdab76a50ec1680281808a38a->hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/91bd679bdab76a50ec1680281808a38a/cf/eb80b8683dcc4438979a5d4ea75629e9-top 2024-12-10T14:56:59,481 INFO [StoreOpener-7989d98b9d2c3897b7c4cfb24cffcca9-1 {}] regionserver.HStore(327): Store=7989d98b9d2c3897b7c4cfb24cffcca9/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T14:56:59,482 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/7989d98b9d2c3897b7c4cfb24cffcca9 2024-12-10T14:56:59,483 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/7989d98b9d2c3897b7c4cfb24cffcca9 2024-12-10T14:56:59,485 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(1085): writing seq id for 7989d98b9d2c3897b7c4cfb24cffcca9 2024-12-10T14:56:59,486 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(1102): Opened 7989d98b9d2c3897b7c4cfb24cffcca9; next sequenceid=9; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60010040, jitterRate=-0.1057807207107544}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-10T14:56:59,486 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(1001): Region open journal for 7989d98b9d2c3897b7c4cfb24cffcca9: 2024-12-10T14:56:59,487 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1733842618177.7989d98b9d2c3897b7c4cfb24cffcca9., pid=121, masterSystemTime=1733842619435 2024-12-10T14:56:59,488 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.CompactSplit(342): Ignoring compaction request for testtb-testExportFileSystemStateWithMergeRegion-1,,1733842618177.7989d98b9d2c3897b7c4cfb24cffcca9.,because compaction is disabled. 2024-12-10T14:56:59,489 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1733842618177.7989d98b9d2c3897b7c4cfb24cffcca9. 2024-12-10T14:56:59,489 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1733842618177.7989d98b9d2c3897b7c4cfb24cffcca9. 2024-12-10T14:56:59,490 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=120 updating hbase:meta row=7989d98b9d2c3897b7c4cfb24cffcca9, regionState=OPEN, openSeqNum=9, regionLocation=db1d50717577,41775,1733842502887 2024-12-10T14:56:59,493 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=121, resume processing ppid=120 2024-12-10T14:56:59,493 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=121, ppid=120, state=SUCCESS; OpenRegionProcedure 7989d98b9d2c3897b7c4cfb24cffcca9, server=db1d50717577,41775,1733842502887 in 208 msec 2024-12-10T14:56:59,495 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=120, resume processing ppid=115 2024-12-10T14:56:59,495 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=120, ppid=115, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=7989d98b9d2c3897b7c4cfb24cffcca9, ASSIGN in 363 msec 2024-12-10T14:56:59,496 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=115, state=SUCCESS; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[36745f7a1ea9bfbd1ff9249afef0a057, 91bd679bdab76a50ec1680281808a38a], force=true in 693 msec 2024-12-10T14:56:59,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-12-10T14:56:59,916 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: MERGE_REGIONS, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1, procId: 115 completed 2024-12-10T14:56:59,917 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-12-10T14:56:59,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733842619917 (current time:1733842619917). 2024-12-10T14:56:59,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-10T14:56:59,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 VERSION not specified, setting to 2 2024-12-10T14:56:59,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-10T14:56:59,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x560e5065 to 127.0.0.1:57176 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@a42dd38 2024-12-10T14:56:59,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1da91ef7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:56:59,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:56:59,925 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48752, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:56:59,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x560e5065 to 127.0.0.1:57176 2024-12-10T14:56:59,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:56:59,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x00bcce2a to 127.0.0.1:57176 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1c0b7818 2024-12-10T14:56:59,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@490083c8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:56:59,931 DEBUG [hconnection-0x44e5654a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:56:59,932 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48762, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:56:59,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:56:59,934 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60528, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:56:59,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x00bcce2a to 127.0.0.1:57176 2024-12-10T14:56:59,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:56:59,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-12-10T14:56:59,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-10T14:56:59,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] procedure2.ProcedureExecutor(1098): Stored pid=122, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-12-10T14:56:59,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 122 2024-12-10T14:56:59,938 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-10T14:56:59,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-10T14:56:59,939 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-10T14:56:59,941 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-10T14:56:59,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742121_1297 (size=216) 2024-12-10T14:56:59,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742121_1297 (size=216) 2024-12-10T14:56:59,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742121_1297 (size=216) 2024-12-10T14:56:59,951 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-10T14:56:59,951 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE; SnapshotRegionProcedure 7989d98b9d2c3897b7c4cfb24cffcca9}] 2024-12-10T14:56:59,952 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=123, ppid=122, state=RUNNABLE; SnapshotRegionProcedure 7989d98b9d2c3897b7c4cfb24cffcca9 2024-12-10T14:57:00,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-10T14:57:00,104 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,41775,1733842502887 2024-12-10T14:57:00,105 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41775 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=123 2024-12-10T14:57:00,105 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1733842618177.7989d98b9d2c3897b7c4cfb24cffcca9. 2024-12-10T14:57:00,105 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegion(2538): Flush status journal for 7989d98b9d2c3897b7c4cfb24cffcca9: 2024-12-10T14:57:00,105 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion-1,,1733842618177.7989d98b9d2c3897b7c4cfb24cffcca9. for snaptb0-testExportFileSystemStateWithMergeRegion-1 completed. 2024-12-10T14:57:00,106 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733842618177.7989d98b9d2c3897b7c4cfb24cffcca9.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-10T14:57:00,106 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-10T14:57:00,106 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/7989d98b9d2c3897b7c4cfb24cffcca9/cf/e3bb42a5b11343a98728dced90d827dd.36745f7a1ea9bfbd1ff9249afef0a057->hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/36745f7a1ea9bfbd1ff9249afef0a057/cf/e3bb42a5b11343a98728dced90d827dd-top, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/7989d98b9d2c3897b7c4cfb24cffcca9/cf/eb80b8683dcc4438979a5d4ea75629e9.91bd679bdab76a50ec1680281808a38a->hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/91bd679bdab76a50ec1680281808a38a/cf/eb80b8683dcc4438979a5d4ea75629e9-top] hfiles 2024-12-10T14:57:00,106 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(265): Adding reference for file (1/2): hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/7989d98b9d2c3897b7c4cfb24cffcca9/cf/e3bb42a5b11343a98728dced90d827dd.36745f7a1ea9bfbd1ff9249afef0a057 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-10T14:57:00,107 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(265): Adding reference for file (2/2): hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/7989d98b9d2c3897b7c4cfb24cffcca9/cf/eb80b8683dcc4438979a5d4ea75629e9.91bd679bdab76a50ec1680281808a38a for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-10T14:57:00,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742122_1298 (size=269) 2024-12-10T14:57:00,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742122_1298 (size=269) 2024-12-10T14:57:00,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742122_1298 (size=269) 2024-12-10T14:57:00,116 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1733842618177.7989d98b9d2c3897b7c4cfb24cffcca9. 2024-12-10T14:57:00,116 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=123 2024-12-10T14:57:00,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.HMaster(4106): Remote procedure done, pid=123 2024-12-10T14:57:00,116 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 on region 7989d98b9d2c3897b7c4cfb24cffcca9 2024-12-10T14:57:00,117 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=123, ppid=122, state=RUNNABLE; SnapshotRegionProcedure 7989d98b9d2c3897b7c4cfb24cffcca9 2024-12-10T14:57:00,125 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=123, resume processing ppid=122 2024-12-10T14:57:00,125 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=123, ppid=122, state=SUCCESS; SnapshotRegionProcedure 7989d98b9d2c3897b7c4cfb24cffcca9 in 167 msec 2024-12-10T14:57:00,125 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-10T14:57:00,125 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-10T14:57:00,126 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-10T14:57:00,126 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-10T14:57:00,127 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-10T14:57:00,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742123_1299 (size=670) 2024-12-10T14:57:00,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742123_1299 (size=670) 2024-12-10T14:57:00,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742123_1299 (size=670) 2024-12-10T14:57:00,144 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-10T14:57:00,152 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-10T14:57:00,152 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-10T14:57:00,153 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-10T14:57:00,154 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 122 2024-12-10T14:57:00,155 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=122, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } in 218 msec 2024-12-10T14:57:00,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-10T14:57:00,241 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1, procId: 122 completed 2024-12-10T14:57:00,241 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842620241 2024-12-10T14:57:00,241 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:36449, tgtDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842620241, rawTgtDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842620241, srcFsUri=hdfs://localhost:36449, srcDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef 2024-12-10T14:57:00,283 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:36449, inputRoot=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef 2024-12-10T14:57:00,283 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_996159037_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842620241, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842620241/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-10T14:57:00,285 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-10T14:57:00,289 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842620241/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-10T14:57:00,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742124_1300 (size=216) 2024-12-10T14:57:00,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742124_1300 (size=216) 2024-12-10T14:57:00,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742124_1300 (size=216) 2024-12-10T14:57:00,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742125_1301 (size=670) 2024-12-10T14:57:00,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742125_1301 (size=670) 2024-12-10T14:57:00,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742125_1301 (size=670) 2024-12-10T14:57:00,314 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-10T14:57:00,315 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-10T14:57:00,315 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-10T14:57:00,315 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-10T14:57:00,411 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733842510486_0005_000001 (auth:SIMPLE) from 127.0.0.1:50548 2024-12-10T14:57:00,423 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-0_2/usercache/jenkins/appcache/application_1733842510486_0005/container_1733842510486_0005_01_000001/launch_container.sh] 2024-12-10T14:57:00,423 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-0_2/usercache/jenkins/appcache/application_1733842510486_0005/container_1733842510486_0005_01_000001/container_tokens] 2024-12-10T14:57:00,423 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-0_2/usercache/jenkins/appcache/application_1733842510486_0005/container_1733842510486_0005_01_000001/sysfs] 2024-12-10T14:57:00,924 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-10T14:57:01,399 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/hadoop-14705483590926164325.jar 2024-12-10T14:57:01,400 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-10T14:57:01,400 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-10T14:57:01,470 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/hadoop-6624618472185067805.jar 2024-12-10T14:57:01,471 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-10T14:57:01,471 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-10T14:57:01,471 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-10T14:57:01,471 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-10T14:57:01,472 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-10T14:57:01,472 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-10T14:57:01,472 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-10T14:57:01,472 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-10T14:57:01,473 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-10T14:57:01,473 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-10T14:57:01,473 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-10T14:57:01,473 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-10T14:57:01,473 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-10T14:57:01,474 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-10T14:57:01,474 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-10T14:57:01,474 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-10T14:57:01,474 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-10T14:57:01,474 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-10T14:57:01,475 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-10T14:57:01,475 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-10T14:57:01,475 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-10T14:57:01,475 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-10T14:57:01,476 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-10T14:57:01,476 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-10T14:57:01,476 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-10T14:57:01,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742126_1302 (size=6350861) 2024-12-10T14:57:01,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742126_1302 (size=6350861) 2024-12-10T14:57:01,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742126_1302 (size=6350861) 2024-12-10T14:57:01,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742127_1303 (size=127628) 2024-12-10T14:57:01,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742127_1303 (size=127628) 2024-12-10T14:57:01,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742127_1303 (size=127628) 2024-12-10T14:57:01,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742128_1304 (size=2172101) 2024-12-10T14:57:01,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742128_1304 (size=2172101) 2024-12-10T14:57:01,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742128_1304 (size=2172101) 2024-12-10T14:57:01,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742129_1305 (size=213228) 2024-12-10T14:57:01,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742129_1305 (size=213228) 2024-12-10T14:57:01,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742129_1305 (size=213228) 2024-12-10T14:57:01,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742130_1306 (size=1877034) 2024-12-10T14:57:01,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742130_1306 (size=1877034) 2024-12-10T14:57:01,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742130_1306 (size=1877034) 2024-12-10T14:57:01,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742131_1307 (size=533455) 2024-12-10T14:57:01,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742131_1307 (size=533455) 2024-12-10T14:57:01,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742131_1307 (size=533455) 2024-12-10T14:57:01,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742132_1308 (size=7280644) 2024-12-10T14:57:01,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742132_1308 (size=7280644) 2024-12-10T14:57:01,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742132_1308 (size=7280644) 2024-12-10T14:57:01,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742133_1309 (size=4188619) 2024-12-10T14:57:01,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742133_1309 (size=4188619) 2024-12-10T14:57:01,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742133_1309 (size=4188619) 2024-12-10T14:57:01,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742134_1310 (size=20406) 2024-12-10T14:57:01,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742134_1310 (size=20406) 2024-12-10T14:57:01,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742134_1310 (size=20406) 2024-12-10T14:57:01,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742135_1311 (size=75495) 2024-12-10T14:57:01,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742135_1311 (size=75495) 2024-12-10T14:57:01,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742135_1311 (size=75495) 2024-12-10T14:57:01,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742136_1312 (size=45609) 2024-12-10T14:57:01,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742136_1312 (size=45609) 2024-12-10T14:57:01,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742136_1312 (size=45609) 2024-12-10T14:57:01,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742137_1313 (size=110084) 2024-12-10T14:57:01,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742137_1313 (size=110084) 2024-12-10T14:57:01,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742137_1313 (size=110084) 2024-12-10T14:57:01,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742138_1314 (size=1323991) 2024-12-10T14:57:01,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742138_1314 (size=1323991) 2024-12-10T14:57:01,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742138_1314 (size=1323991) 2024-12-10T14:57:01,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742139_1315 (size=23076) 2024-12-10T14:57:01,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742139_1315 (size=23076) 2024-12-10T14:57:01,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742139_1315 (size=23076) 2024-12-10T14:57:01,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742140_1316 (size=451756) 2024-12-10T14:57:01,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742140_1316 (size=451756) 2024-12-10T14:57:01,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742140_1316 (size=451756) 2024-12-10T14:57:01,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742141_1317 (size=126803) 2024-12-10T14:57:01,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742141_1317 (size=126803) 2024-12-10T14:57:01,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742141_1317 (size=126803) 2024-12-10T14:57:01,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742142_1318 (size=322274) 2024-12-10T14:57:01,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742142_1318 (size=322274) 2024-12-10T14:57:01,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742142_1318 (size=322274) 2024-12-10T14:57:01,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742143_1319 (size=1832290) 2024-12-10T14:57:01,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742143_1319 (size=1832290) 2024-12-10T14:57:01,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742143_1319 (size=1832290) 2024-12-10T14:57:01,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742144_1320 (size=30081) 2024-12-10T14:57:01,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742144_1320 (size=30081) 2024-12-10T14:57:01,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742144_1320 (size=30081) 2024-12-10T14:57:01,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742145_1321 (size=53616) 2024-12-10T14:57:01,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742145_1321 (size=53616) 2024-12-10T14:57:01,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742145_1321 (size=53616) 2024-12-10T14:57:01,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742146_1322 (size=29229) 2024-12-10T14:57:01,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742146_1322 (size=29229) 2024-12-10T14:57:01,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742146_1322 (size=29229) 2024-12-10T14:57:01,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742147_1323 (size=169089) 2024-12-10T14:57:01,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742147_1323 (size=169089) 2024-12-10T14:57:01,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742147_1323 (size=169089) 2024-12-10T14:57:01,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742148_1324 (size=5175431) 2024-12-10T14:57:01,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742148_1324 (size=5175431) 2024-12-10T14:57:01,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742148_1324 (size=5175431) 2024-12-10T14:57:01,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742149_1325 (size=136454) 2024-12-10T14:57:01,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742149_1325 (size=136454) 2024-12-10T14:57:01,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742149_1325 (size=136454) 2024-12-10T14:57:01,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742150_1326 (size=907858) 2024-12-10T14:57:01,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742150_1326 (size=907858) 2024-12-10T14:57:01,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742150_1326 (size=907858) 2024-12-10T14:57:01,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742151_1327 (size=3317408) 2024-12-10T14:57:01,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742151_1327 (size=3317408) 2024-12-10T14:57:01,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742151_1327 (size=3317408) 2024-12-10T14:57:01,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742152_1328 (size=503880) 2024-12-10T14:57:01,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742152_1328 (size=503880) 2024-12-10T14:57:01,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742152_1328 (size=503880) 2024-12-10T14:57:01,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742153_1329 (size=4695811) 2024-12-10T14:57:01,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742153_1329 (size=4695811) 2024-12-10T14:57:01,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742153_1329 (size=4695811) 2024-12-10T14:57:01,876 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-10T14:57:01,878 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testExportFileSystemStateWithMergeRegion-1' hfile list 2024-12-10T14:57:01,880 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=9.7 K 2024-12-10T14:57:01,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742154_1330 (size=378) 2024-12-10T14:57:01,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742154_1330 (size=378) 2024-12-10T14:57:01,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742154_1330 (size=378) 2024-12-10T14:57:01,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742155_1331 (size=15) 2024-12-10T14:57:01,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742155_1331 (size=15) 2024-12-10T14:57:01,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742155_1331 (size=15) 2024-12-10T14:57:01,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742156_1332 (size=304942) 2024-12-10T14:57:01,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742156_1332 (size=304942) 2024-12-10T14:57:01,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742156_1332 (size=304942) 2024-12-10T14:57:01,936 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-10T14:57:01,936 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-10T14:57:02,001 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733842510486_0006_000001 (auth:SIMPLE) from 127.0.0.1:45718 2024-12-10T14:57:02,048 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-10T14:57:02,592 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-12-10T14:57:02,592 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion Metrics about Tables on a single HBase RegionServer 2024-12-10T14:57:02,593 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-10T14:57:02,593 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 Metrics about Tables on a single HBase RegionServer 2024-12-10T14:57:02,594 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-12-10T14:57:05,974 DEBUG [master/db1d50717577:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 54dee9bbb9571fe8ba05781cf679cda0 changed from -1.0 to 0.0, refreshing cache 2024-12-10T14:57:05,974 DEBUG [master/db1d50717577:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region ad6ff8dc223b4960571243a5d1c11885 changed from -1.0 to 0.0, refreshing cache 2024-12-10T14:57:07,636 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733842510486_0006_000001 (auth:SIMPLE) from 127.0.0.1:55796 2024-12-10T14:57:07,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742157_1333 (size=350616) 2024-12-10T14:57:07,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742157_1333 (size=350616) 2024-12-10T14:57:07,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742157_1333 (size=350616) 2024-12-10T14:57:08,096 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-10T14:57:09,993 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733842510486_0006_000001 (auth:SIMPLE) from 127.0.0.1:45728 2024-12-10T14:57:14,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742158_1334 (size=4945) 2024-12-10T14:57:14,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742158_1334 (size=4945) 2024-12-10T14:57:14,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742158_1334 (size=4945) 2024-12-10T14:57:14,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742159_1335 (size=4945) 2024-12-10T14:57:14,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742159_1335 (size=4945) 2024-12-10T14:57:14,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742159_1335 (size=4945) 2024-12-10T14:57:14,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742160_1336 (size=17474) 2024-12-10T14:57:14,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742160_1336 (size=17474) 2024-12-10T14:57:14,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742160_1336 (size=17474) 2024-12-10T14:57:14,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742161_1337 (size=482) 2024-12-10T14:57:14,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742161_1337 (size=482) 2024-12-10T14:57:14,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742161_1337 (size=482) 2024-12-10T14:57:14,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742162_1338 (size=17474) 2024-12-10T14:57:14,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742162_1338 (size=17474) 2024-12-10T14:57:14,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742162_1338 (size=17474) 2024-12-10T14:57:14,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742163_1339 (size=350616) 2024-12-10T14:57:14,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742163_1339 (size=350616) 2024-12-10T14:57:14,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742163_1339 (size=350616) 2024-12-10T14:57:14,277 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733842510486_0006_000001 (auth:SIMPLE) from 127.0.0.1:46152 2024-12-10T14:57:14,280 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-1_0/usercache/jenkins/appcache/application_1733842510486_0006/container_1733842510486_0006_01_000002/launch_container.sh] 2024-12-10T14:57:14,280 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-1_0/usercache/jenkins/appcache/application_1733842510486_0006/container_1733842510486_0006_01_000002/container_tokens] 2024-12-10T14:57:14,280 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-1_0/usercache/jenkins/appcache/application_1733842510486_0006/container_1733842510486_0006_01_000002/sysfs] 2024-12-10T14:57:16,292 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-10T14:57:16,293 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-10T14:57:16,299 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-10T14:57:16,299 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-10T14:57:16,300 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-10T14:57:16,300 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_996159037_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-10T14:57:16,300 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-12-10T14:57:16,300 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-12-10T14:57:16,301 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_996159037_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842620241/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842620241/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-10T14:57:16,301 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842620241/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-12-10T14:57:16,301 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842620241/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-12-10T14:57:16,306 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-10T14:57:16,306 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-10T14:57:16,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] procedure2.ProcedureExecutor(1098): Stored pid=124, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-10T14:57:16,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-10T14:57:16,310 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733842636310"}]},"ts":"1733842636310"} 2024-12-10T14:57:16,312 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLING in hbase:meta 2024-12-10T14:57:16,314 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLING 2024-12-10T14:57:16,314 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=125, ppid=124, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1}] 2024-12-10T14:57:16,315 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=7989d98b9d2c3897b7c4cfb24cffcca9, UNASSIGN}] 2024-12-10T14:57:16,316 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=7989d98b9d2c3897b7c4cfb24cffcca9, UNASSIGN 2024-12-10T14:57:16,317 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=126 updating hbase:meta row=7989d98b9d2c3897b7c4cfb24cffcca9, regionState=CLOSING, regionLocation=db1d50717577,41775,1733842502887 2024-12-10T14:57:16,318 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-10T14:57:16,318 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE; CloseRegionProcedure 7989d98b9d2c3897b7c4cfb24cffcca9, server=db1d50717577,41775,1733842502887}] 2024-12-10T14:57:16,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-10T14:57:16,469 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,41775,1733842502887 2024-12-10T14:57:16,470 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] handler.UnassignRegionHandler(124): Close 7989d98b9d2c3897b7c4cfb24cffcca9 2024-12-10T14:57:16,470 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-10T14:57:16,470 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1681): Closing 7989d98b9d2c3897b7c4cfb24cffcca9, disabling compactions & flushes 2024-12-10T14:57:16,470 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1733842618177.7989d98b9d2c3897b7c4cfb24cffcca9. 2024-12-10T14:57:16,470 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733842618177.7989d98b9d2c3897b7c4cfb24cffcca9. 2024-12-10T14:57:16,470 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733842618177.7989d98b9d2c3897b7c4cfb24cffcca9. after waiting 0 ms 2024-12-10T14:57:16,470 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1733842618177.7989d98b9d2c3897b7c4cfb24cffcca9. 2024-12-10T14:57:16,475 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/7989d98b9d2c3897b7c4cfb24cffcca9/recovered.edits/12.seqid, newMaxSeqId=12, maxSeqId=8 2024-12-10T14:57:16,476 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-10T14:57:16,476 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1733842618177.7989d98b9d2c3897b7c4cfb24cffcca9. 2024-12-10T14:57:16,476 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1635): Region close journal for 7989d98b9d2c3897b7c4cfb24cffcca9: 2024-12-10T14:57:16,478 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] handler.UnassignRegionHandler(170): Closed 7989d98b9d2c3897b7c4cfb24cffcca9 2024-12-10T14:57:16,478 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=126 updating hbase:meta row=7989d98b9d2c3897b7c4cfb24cffcca9, regionState=CLOSED 2024-12-10T14:57:16,481 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=127, resume processing ppid=126 2024-12-10T14:57:16,481 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=127, ppid=126, state=SUCCESS; CloseRegionProcedure 7989d98b9d2c3897b7c4cfb24cffcca9, server=db1d50717577,41775,1733842502887 in 162 msec 2024-12-10T14:57:16,482 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=126, resume processing ppid=125 2024-12-10T14:57:16,482 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=126, ppid=125, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=7989d98b9d2c3897b7c4cfb24cffcca9, UNASSIGN in 166 msec 2024-12-10T14:57:16,484 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=125, resume processing ppid=124 2024-12-10T14:57:16,484 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=125, ppid=124, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 169 msec 2024-12-10T14:57:16,485 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733842636485"}]},"ts":"1733842636485"} 2024-12-10T14:57:16,486 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLED in hbase:meta 2024-12-10T14:57:16,489 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLED 2024-12-10T14:57:16,490 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=124, state=SUCCESS; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 183 msec 2024-12-10T14:57:16,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-10T14:57:16,612 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1, procId: 124 completed 2024-12-10T14:57:16,613 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-10T14:57:16,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] procedure2.ProcedureExecutor(1098): Stored pid=128, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-10T14:57:16,615 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=128, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-10T14:57:16,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-10T14:57:16,615 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=128, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-10T14:57:16,617 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41775 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-10T14:57:16,619 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/7989d98b9d2c3897b7c4cfb24cffcca9 2024-12-10T14:57:16,619 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/91bd679bdab76a50ec1680281808a38a 2024-12-10T14:57:16,619 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/36745f7a1ea9bfbd1ff9249afef0a057 2024-12-10T14:57:16,620 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-10T14:57:16,620 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45255-0x1019cdee8310003, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-10T14:57:16,620 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-10T14:57:16,620 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-10T14:57:16,621 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-10T14:57:16,621 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-10T14:57:16,621 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-10T14:57:16,622 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/36745f7a1ea9bfbd1ff9249afef0a057/cf, FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/36745f7a1ea9bfbd1ff9249afef0a057/recovered.edits] 2024-12-10T14:57:16,622 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-10T14:57:16,622 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45255-0x1019cdee8310003, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-10T14:57:16,622 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-10T14:57:16,623 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45255-0x1019cdee8310003, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:57:16,623 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:57:16,623 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:57:16,623 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data null 2024-12-10T14:57:16,623 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(135): Skipping permission cache refresh because writable data is empty 2024-12-10T14:57:16,623 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:57:16,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-10T14:57:16,624 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/91bd679bdab76a50ec1680281808a38a/cf, FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/91bd679bdab76a50ec1680281808a38a/recovered.edits] 2024-12-10T14:57:16,624 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-10T14:57:16,624 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-10T14:57:16,625 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-10T14:57:16,625 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-10T14:57:16,626 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/7989d98b9d2c3897b7c4cfb24cffcca9/cf, FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/7989d98b9d2c3897b7c4cfb24cffcca9/recovered.edits] 2024-12-10T14:57:16,629 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/91bd679bdab76a50ec1680281808a38a/cf/eb80b8683dcc4438979a5d4ea75629e9 to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/91bd679bdab76a50ec1680281808a38a/cf/eb80b8683dcc4438979a5d4ea75629e9 2024-12-10T14:57:16,630 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/36745f7a1ea9bfbd1ff9249afef0a057/cf/e3bb42a5b11343a98728dced90d827dd to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/36745f7a1ea9bfbd1ff9249afef0a057/cf/e3bb42a5b11343a98728dced90d827dd 2024-12-10T14:57:16,632 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/7989d98b9d2c3897b7c4cfb24cffcca9/cf/eb80b8683dcc4438979a5d4ea75629e9.91bd679bdab76a50ec1680281808a38a to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/7989d98b9d2c3897b7c4cfb24cffcca9/cf/eb80b8683dcc4438979a5d4ea75629e9.91bd679bdab76a50ec1680281808a38a 2024-12-10T14:57:16,632 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/7989d98b9d2c3897b7c4cfb24cffcca9/cf/e3bb42a5b11343a98728dced90d827dd.36745f7a1ea9bfbd1ff9249afef0a057 to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/7989d98b9d2c3897b7c4cfb24cffcca9/cf/e3bb42a5b11343a98728dced90d827dd.36745f7a1ea9bfbd1ff9249afef0a057 2024-12-10T14:57:16,634 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/91bd679bdab76a50ec1680281808a38a/recovered.edits/8.seqid to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/91bd679bdab76a50ec1680281808a38a/recovered.edits/8.seqid 2024-12-10T14:57:16,635 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/36745f7a1ea9bfbd1ff9249afef0a057/recovered.edits/8.seqid to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/36745f7a1ea9bfbd1ff9249afef0a057/recovered.edits/8.seqid 2024-12-10T14:57:16,635 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/91bd679bdab76a50ec1680281808a38a 2024-12-10T14:57:16,636 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/36745f7a1ea9bfbd1ff9249afef0a057 2024-12-10T14:57:16,636 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/7989d98b9d2c3897b7c4cfb24cffcca9/recovered.edits/12.seqid to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/7989d98b9d2c3897b7c4cfb24cffcca9/recovered.edits/12.seqid 2024-12-10T14:57:16,637 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/7989d98b9d2c3897b7c4cfb24cffcca9 2024-12-10T14:57:16,637 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion-1 regions 2024-12-10T14:57:16,639 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=128, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-10T14:57:16,642 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of testtb-testExportFileSystemStateWithMergeRegion-1 from hbase:meta 2024-12-10T14:57:16,644 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' descriptor. 2024-12-10T14:57:16,646 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=128, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-10T14:57:16,646 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' from region states. 2024-12-10T14:57:16,646 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733842618177.7989d98b9d2c3897b7c4cfb24cffcca9.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733842636646"}]},"ts":"9223372036854775807"} 2024-12-10T14:57:16,651 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-10T14:57:16,652 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 7989d98b9d2c3897b7c4cfb24cffcca9, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733842618177.7989d98b9d2c3897b7c4cfb24cffcca9.', STARTKEY => '', ENDKEY => ''}] 2024-12-10T14:57:16,652 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportFileSystemStateWithMergeRegion-1' as deleted. 2024-12-10T14:57:16,652 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733842636652"}]},"ts":"9223372036854775807"} 2024-12-10T14:57:16,656 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportFileSystemStateWithMergeRegion-1 state from META 2024-12-10T14:57:16,661 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=128, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-10T14:57:16,663 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=128, state=SUCCESS; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 48 msec 2024-12-10T14:57:16,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-10T14:57:16,725 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1, procId: 128 completed 2024-12-10T14:57:16,726 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportFileSystemStateWithMergeRegion 2024-12-10T14:57:16,726 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithMergeRegion 2024-12-10T14:57:16,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] procedure2.ProcedureExecutor(1098): Stored pid=129, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-10T14:57:16,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=129 2024-12-10T14:57:16,729 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733842636729"}]},"ts":"1733842636729"} 2024-12-10T14:57:16,731 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLING in hbase:meta 2024-12-10T14:57:16,740 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLING 2024-12-10T14:57:16,741 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=130, ppid=129, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion}] 2024-12-10T14:57:16,743 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=131, ppid=130, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=54dee9bbb9571fe8ba05781cf679cda0, UNASSIGN}, {pid=132, ppid=130, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=ad6ff8dc223b4960571243a5d1c11885, UNASSIGN}] 2024-12-10T14:57:16,744 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=132, ppid=130, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=ad6ff8dc223b4960571243a5d1c11885, UNASSIGN 2024-12-10T14:57:16,744 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=131, ppid=130, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=54dee9bbb9571fe8ba05781cf679cda0, UNASSIGN 2024-12-10T14:57:16,745 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=131 updating hbase:meta row=54dee9bbb9571fe8ba05781cf679cda0, regionState=CLOSING, regionLocation=db1d50717577,37707,1733842502956 2024-12-10T14:57:16,745 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=132 updating hbase:meta row=ad6ff8dc223b4960571243a5d1c11885, regionState=CLOSING, regionLocation=db1d50717577,41775,1733842502887 2024-12-10T14:57:16,747 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-10T14:57:16,747 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=133, ppid=131, state=RUNNABLE; CloseRegionProcedure 54dee9bbb9571fe8ba05781cf679cda0, server=db1d50717577,37707,1733842502956}] 2024-12-10T14:57:16,748 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-10T14:57:16,748 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=134, ppid=132, state=RUNNABLE; CloseRegionProcedure ad6ff8dc223b4960571243a5d1c11885, server=db1d50717577,41775,1733842502887}] 2024-12-10T14:57:16,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=129 2024-12-10T14:57:16,899 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,37707,1733842502956 2024-12-10T14:57:16,900 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] handler.UnassignRegionHandler(124): Close 54dee9bbb9571fe8ba05781cf679cda0 2024-12-10T14:57:16,900 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-10T14:57:16,900 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1681): Closing 54dee9bbb9571fe8ba05781cf679cda0, disabling compactions & flushes 2024-12-10T14:57:16,900 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1733842616828.54dee9bbb9571fe8ba05781cf679cda0. 2024-12-10T14:57:16,900 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,41775,1733842502887 2024-12-10T14:57:16,900 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733842616828.54dee9bbb9571fe8ba05781cf679cda0. 2024-12-10T14:57:16,900 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733842616828.54dee9bbb9571fe8ba05781cf679cda0. after waiting 0 ms 2024-12-10T14:57:16,900 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1733842616828.54dee9bbb9571fe8ba05781cf679cda0. 2024-12-10T14:57:16,901 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] handler.UnassignRegionHandler(124): Close ad6ff8dc223b4960571243a5d1c11885 2024-12-10T14:57:16,901 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-10T14:57:16,901 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1681): Closing ad6ff8dc223b4960571243a5d1c11885, disabling compactions & flushes 2024-12-10T14:57:16,901 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1733842616828.ad6ff8dc223b4960571243a5d1c11885. 2024-12-10T14:57:16,901 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733842616828.ad6ff8dc223b4960571243a5d1c11885. 2024-12-10T14:57:16,901 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733842616828.ad6ff8dc223b4960571243a5d1c11885. after waiting 0 ms 2024-12-10T14:57:16,901 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1733842616828.ad6ff8dc223b4960571243a5d1c11885. 2024-12-10T14:57:16,909 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion/ad6ff8dc223b4960571243a5d1c11885/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-10T14:57:16,909 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-10T14:57:16,909 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1733842616828.ad6ff8dc223b4960571243a5d1c11885. 2024-12-10T14:57:16,910 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1635): Region close journal for ad6ff8dc223b4960571243a5d1c11885: 2024-12-10T14:57:16,914 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=132 updating hbase:meta row=ad6ff8dc223b4960571243a5d1c11885, regionState=CLOSED 2024-12-10T14:57:16,917 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] handler.UnassignRegionHandler(170): Closed ad6ff8dc223b4960571243a5d1c11885 2024-12-10T14:57:16,919 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=134, resume processing ppid=132 2024-12-10T14:57:16,919 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=134, ppid=132, state=SUCCESS; CloseRegionProcedure ad6ff8dc223b4960571243a5d1c11885, server=db1d50717577,41775,1733842502887 in 168 msec 2024-12-10T14:57:16,920 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=132, ppid=130, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=ad6ff8dc223b4960571243a5d1c11885, UNASSIGN in 176 msec 2024-12-10T14:57:16,926 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion/54dee9bbb9571fe8ba05781cf679cda0/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-10T14:57:16,927 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-10T14:57:16,927 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion,,1733842616828.54dee9bbb9571fe8ba05781cf679cda0. 2024-12-10T14:57:16,927 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1635): Region close journal for 54dee9bbb9571fe8ba05781cf679cda0: 2024-12-10T14:57:16,928 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] handler.UnassignRegionHandler(170): Closed 54dee9bbb9571fe8ba05781cf679cda0 2024-12-10T14:57:16,929 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=131 updating hbase:meta row=54dee9bbb9571fe8ba05781cf679cda0, regionState=CLOSED 2024-12-10T14:57:16,934 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=133, resume processing ppid=131 2024-12-10T14:57:16,934 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=133, ppid=131, state=SUCCESS; CloseRegionProcedure 54dee9bbb9571fe8ba05781cf679cda0, server=db1d50717577,37707,1733842502956 in 185 msec 2024-12-10T14:57:16,936 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=131, resume processing ppid=130 2024-12-10T14:57:16,936 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=131, ppid=130, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=54dee9bbb9571fe8ba05781cf679cda0, UNASSIGN in 191 msec 2024-12-10T14:57:16,938 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=130, resume processing ppid=129 2024-12-10T14:57:16,938 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=130, ppid=129, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 195 msec 2024-12-10T14:57:16,939 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733842636939"}]},"ts":"1733842636939"} 2024-12-10T14:57:16,941 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLED in hbase:meta 2024-12-10T14:57:16,942 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLED 2024-12-10T14:57:16,944 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=129, state=SUCCESS; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 217 msec 2024-12-10T14:57:17,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=129 2024-12-10T14:57:17,032 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion, procId: 129 completed 2024-12-10T14:57:17,033 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithMergeRegion 2024-12-10T14:57:17,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] procedure2.ProcedureExecutor(1098): Stored pid=135, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-10T14:57:17,035 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=135, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-10T14:57:17,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion 2024-12-10T14:57:17,036 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=135, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-10T14:57:17,039 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41775 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion 2024-12-10T14:57:17,040 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion/54dee9bbb9571fe8ba05781cf679cda0 2024-12-10T14:57:17,041 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion/ad6ff8dc223b4960571243a5d1c11885 2024-12-10T14:57:17,043 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-10T14:57:17,043 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-10T14:57:17,043 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-10T14:57:17,043 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45255-0x1019cdee8310003, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-10T14:57:17,044 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-10T14:57:17,044 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-10T14:57:17,045 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-10T14:57:17,045 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion/ad6ff8dc223b4960571243a5d1c11885/cf, FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion/ad6ff8dc223b4960571243a5d1c11885/recovered.edits] 2024-12-10T14:57:17,045 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-10T14:57:17,045 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion/54dee9bbb9571fe8ba05781cf679cda0/cf, FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion/54dee9bbb9571fe8ba05781cf679cda0/recovered.edits] 2024-12-10T14:57:17,052 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion/54dee9bbb9571fe8ba05781cf679cda0/cf/7950909cecce407680c9226c2913b6e5 to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/54dee9bbb9571fe8ba05781cf679cda0/cf/7950909cecce407680c9226c2913b6e5 2024-12-10T14:57:17,053 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion/ad6ff8dc223b4960571243a5d1c11885/cf/6208e827017f45509e36239a27948ef3 to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/ad6ff8dc223b4960571243a5d1c11885/cf/6208e827017f45509e36239a27948ef3 2024-12-10T14:57:17,056 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45255-0x1019cdee8310003, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-10T14:57:17,056 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-10T14:57:17,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45255-0x1019cdee8310003, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:57:17,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:57:17,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-10T14:57:17,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:57:17,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-10T14:57:17,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:57:17,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=135 2024-12-10T14:57:17,060 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion/ad6ff8dc223b4960571243a5d1c11885/recovered.edits/9.seqid to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/ad6ff8dc223b4960571243a5d1c11885/recovered.edits/9.seqid 2024-12-10T14:57:17,061 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion/54dee9bbb9571fe8ba05781cf679cda0/recovered.edits/9.seqid to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/54dee9bbb9571fe8ba05781cf679cda0/recovered.edits/9.seqid 2024-12-10T14:57:17,061 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion/ad6ff8dc223b4960571243a5d1c11885 2024-12-10T14:57:17,062 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithMergeRegion/54dee9bbb9571fe8ba05781cf679cda0 2024-12-10T14:57:17,062 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion regions 2024-12-10T14:57:17,064 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=135, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-10T14:57:17,067 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithMergeRegion from hbase:meta 2024-12-10T14:57:17,070 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportFileSystemStateWithMergeRegion' descriptor. 2024-12-10T14:57:17,071 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=135, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-10T14:57:17,071 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportFileSystemStateWithMergeRegion' from region states. 2024-12-10T14:57:17,072 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1733842616828.54dee9bbb9571fe8ba05781cf679cda0.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733842637071"}]},"ts":"9223372036854775807"} 2024-12-10T14:57:17,072 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1733842616828.ad6ff8dc223b4960571243a5d1c11885.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733842637071"}]},"ts":"9223372036854775807"} 2024-12-10T14:57:17,074 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-10T14:57:17,074 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 54dee9bbb9571fe8ba05781cf679cda0, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1733842616828.54dee9bbb9571fe8ba05781cf679cda0.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => ad6ff8dc223b4960571243a5d1c11885, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1733842616828.ad6ff8dc223b4960571243a5d1c11885.', STARTKEY => '1', ENDKEY => ''}] 2024-12-10T14:57:17,074 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportFileSystemStateWithMergeRegion' as deleted. 2024-12-10T14:57:17,074 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733842637074"}]},"ts":"9223372036854775807"} 2024-12-10T14:57:17,076 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportFileSystemStateWithMergeRegion state from META 2024-12-10T14:57:17,081 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=135, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-10T14:57:17,082 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=135, state=SUCCESS; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 48 msec 2024-12-10T14:57:17,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=135 2024-12-10T14:57:17,161 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion, procId: 135 completed 2024-12-10T14:57:17,173 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithMergeRegion" 2024-12-10T14:57:17,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-10T14:57:17,178 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion" 2024-12-10T14:57:17,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-10T14:57:17,181 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion-1" 2024-12-10T14:57:17,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-10T14:57:17,222 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=811 (was 797) Potentially hanging thread: LogDeleter #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33879 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1891196410) connection to localhost/127.0.0.1:33879 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1918550793_1 at /127.0.0.1:41862 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x50bdc134-shared-pool-31 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_996159037_22 at /127.0.0.1:41886 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x50bdc134-shared-pool-29 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/db1d50717577:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/db1d50717577:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x50bdc134-shared-pool-33 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34295 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x50bdc134-shared-pool-28 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-4738 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: ApplicationMasterLauncher #10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x50bdc134-shared-pool-30 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-16 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_996159037_22 at /127.0.0.1:55516 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 3684) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_996159037_22 at /127.0.0.1:50324 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x50bdc134-shared-pool-32 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/db1d50717577:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1918550793_1 at /127.0.0.1:55494 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=809 (was 807) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=547 (was 551), ProcessCount=20 (was 20), AvailableMemoryMB=325 (was 1584) 2024-12-10T14:57:17,223 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=811 is superior to 500 2024-12-10T14:57:17,251 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportExpiredSnapshot Thread=811, OpenFileDescriptor=809, MaxFileDescriptor=1048576, SystemLoadAverage=547, ProcessCount=20, AvailableMemoryMB=324 2024-12-10T14:57:17,251 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=811 is superior to 500 2024-12-10T14:57:17,253 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-10T14:57:17,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] procedure2.ProcedureExecutor(1098): Stored pid=136, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-10T14:57:17,256 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=136, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-12-10T14:57:17,256 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:57:17,256 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportExpiredSnapshot" procId is: 136 2024-12-10T14:57:17,257 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=136, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-10T14:57:17,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-10T14:57:17,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742164_1340 (size=407) 2024-12-10T14:57:17,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742164_1340 (size=407) 2024-12-10T14:57:17,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742164_1340 (size=407) 2024-12-10T14:57:17,269 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => deff4b68b658211e9f80a2e5cdd7e84e, NAME => 'testtb-testExportExpiredSnapshot,,1733842637253.deff4b68b658211e9f80a2e5cdd7e84e.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef 2024-12-10T14:57:17,269 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => d07239f99c0a8b2263359f08d82bc5f8, NAME => 'testtb-testExportExpiredSnapshot,1,1733842637253.d07239f99c0a8b2263359f08d82bc5f8.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef 2024-12-10T14:57:17,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742165_1341 (size=68) 2024-12-10T14:57:17,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742165_1341 (size=68) 2024-12-10T14:57:17,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742165_1341 (size=68) 2024-12-10T14:57:17,283 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportExpiredSnapshot,1,1733842637253.d07239f99c0a8b2263359f08d82bc5f8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:57:17,283 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1681): Closing d07239f99c0a8b2263359f08d82bc5f8, disabling compactions & flushes 2024-12-10T14:57:17,283 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportExpiredSnapshot,1,1733842637253.d07239f99c0a8b2263359f08d82bc5f8. 2024-12-10T14:57:17,283 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportExpiredSnapshot,1,1733842637253.d07239f99c0a8b2263359f08d82bc5f8. 2024-12-10T14:57:17,283 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportExpiredSnapshot,1,1733842637253.d07239f99c0a8b2263359f08d82bc5f8. after waiting 0 ms 2024-12-10T14:57:17,283 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportExpiredSnapshot,1,1733842637253.d07239f99c0a8b2263359f08d82bc5f8. 2024-12-10T14:57:17,283 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportExpiredSnapshot,1,1733842637253.d07239f99c0a8b2263359f08d82bc5f8. 2024-12-10T14:57:17,283 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1635): Region close journal for d07239f99c0a8b2263359f08d82bc5f8: 2024-12-10T14:57:17,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742166_1342 (size=68) 2024-12-10T14:57:17,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742166_1342 (size=68) 2024-12-10T14:57:17,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742166_1342 (size=68) 2024-12-10T14:57:17,296 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportExpiredSnapshot,,1733842637253.deff4b68b658211e9f80a2e5cdd7e84e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:57:17,296 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1681): Closing deff4b68b658211e9f80a2e5cdd7e84e, disabling compactions & flushes 2024-12-10T14:57:17,296 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportExpiredSnapshot,,1733842637253.deff4b68b658211e9f80a2e5cdd7e84e. 2024-12-10T14:57:17,296 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportExpiredSnapshot,,1733842637253.deff4b68b658211e9f80a2e5cdd7e84e. 2024-12-10T14:57:17,296 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportExpiredSnapshot,,1733842637253.deff4b68b658211e9f80a2e5cdd7e84e. after waiting 0 ms 2024-12-10T14:57:17,296 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportExpiredSnapshot,,1733842637253.deff4b68b658211e9f80a2e5cdd7e84e. 2024-12-10T14:57:17,296 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportExpiredSnapshot,,1733842637253.deff4b68b658211e9f80a2e5cdd7e84e. 2024-12-10T14:57:17,296 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1635): Region close journal for deff4b68b658211e9f80a2e5cdd7e84e: 2024-12-10T14:57:17,300 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=136, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-12-10T14:57:17,300 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,1,1733842637253.d07239f99c0a8b2263359f08d82bc5f8.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733842637300"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733842637300"}]},"ts":"1733842637300"} 2024-12-10T14:57:17,300 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,,1733842637253.deff4b68b658211e9f80a2e5cdd7e84e.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733842637300"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733842637300"}]},"ts":"1733842637300"} 2024-12-10T14:57:17,302 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-10T14:57:17,303 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=136, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-10T14:57:17,304 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733842637303"}]},"ts":"1733842637303"} 2024-12-10T14:57:17,305 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-12-10T14:57:17,309 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(202): Hosts are {db1d50717577=0} racks are {/default-rack=0} 2024-12-10T14:57:17,311 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-10T14:57:17,311 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-10T14:57:17,311 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-10T14:57:17,311 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-10T14:57:17,311 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-10T14:57:17,311 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-10T14:57:17,311 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-10T14:57:17,312 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=deff4b68b658211e9f80a2e5cdd7e84e, ASSIGN}, {pid=138, ppid=136, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=d07239f99c0a8b2263359f08d82bc5f8, ASSIGN}] 2024-12-10T14:57:17,313 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=138, ppid=136, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=d07239f99c0a8b2263359f08d82bc5f8, ASSIGN 2024-12-10T14:57:17,313 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=137, ppid=136, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=deff4b68b658211e9f80a2e5cdd7e84e, ASSIGN 2024-12-10T14:57:17,314 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=137, ppid=136, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=deff4b68b658211e9f80a2e5cdd7e84e, ASSIGN; state=OFFLINE, location=db1d50717577,37707,1733842502956; forceNewPlan=false, retain=false 2024-12-10T14:57:17,314 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=138, ppid=136, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=d07239f99c0a8b2263359f08d82bc5f8, ASSIGN; state=OFFLINE, location=db1d50717577,41775,1733842502887; forceNewPlan=false, retain=false 2024-12-10T14:57:17,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-10T14:57:17,464 INFO [db1d50717577:36533 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-10T14:57:17,465 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=137 updating hbase:meta row=deff4b68b658211e9f80a2e5cdd7e84e, regionState=OPENING, regionLocation=db1d50717577,37707,1733842502956 2024-12-10T14:57:17,465 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=138 updating hbase:meta row=d07239f99c0a8b2263359f08d82bc5f8, regionState=OPENING, regionLocation=db1d50717577,41775,1733842502887 2024-12-10T14:57:17,467 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=139, ppid=138, state=RUNNABLE; OpenRegionProcedure d07239f99c0a8b2263359f08d82bc5f8, server=db1d50717577,41775,1733842502887}] 2024-12-10T14:57:17,468 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=140, ppid=137, state=RUNNABLE; OpenRegionProcedure deff4b68b658211e9f80a2e5cdd7e84e, server=db1d50717577,37707,1733842502956}] 2024-12-10T14:57:17,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-10T14:57:17,619 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,41775,1733842502887 2024-12-10T14:57:17,622 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,37707,1733842502956 2024-12-10T14:57:17,624 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] handler.AssignRegionHandler(135): Open testtb-testExportExpiredSnapshot,1,1733842637253.d07239f99c0a8b2263359f08d82bc5f8. 2024-12-10T14:57:17,624 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(7285): Opening region: {ENCODED => d07239f99c0a8b2263359f08d82bc5f8, NAME => 'testtb-testExportExpiredSnapshot,1,1733842637253.d07239f99c0a8b2263359f08d82bc5f8.', STARTKEY => '1', ENDKEY => ''} 2024-12-10T14:57:17,624 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,1,1733842637253.d07239f99c0a8b2263359f08d82bc5f8. service=AccessControlService 2024-12-10T14:57:17,624 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] handler.AssignRegionHandler(135): Open testtb-testExportExpiredSnapshot,,1733842637253.deff4b68b658211e9f80a2e5cdd7e84e. 2024-12-10T14:57:17,625 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-10T14:57:17,625 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(7285): Opening region: {ENCODED => deff4b68b658211e9f80a2e5cdd7e84e, NAME => 'testtb-testExportExpiredSnapshot,,1733842637253.deff4b68b658211e9f80a2e5cdd7e84e.', STARTKEY => '', ENDKEY => '1'} 2024-12-10T14:57:17,625 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot d07239f99c0a8b2263359f08d82bc5f8 2024-12-10T14:57:17,625 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(894): Instantiated testtb-testExportExpiredSnapshot,1,1733842637253.d07239f99c0a8b2263359f08d82bc5f8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:57:17,625 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,,1733842637253.deff4b68b658211e9f80a2e5cdd7e84e. service=AccessControlService 2024-12-10T14:57:17,625 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(7327): checking encryption for d07239f99c0a8b2263359f08d82bc5f8 2024-12-10T14:57:17,625 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(7330): checking classloading for d07239f99c0a8b2263359f08d82bc5f8 2024-12-10T14:57:17,625 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-10T14:57:17,625 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot deff4b68b658211e9f80a2e5cdd7e84e 2024-12-10T14:57:17,626 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(894): Instantiated testtb-testExportExpiredSnapshot,,1733842637253.deff4b68b658211e9f80a2e5cdd7e84e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:57:17,626 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(7327): checking encryption for deff4b68b658211e9f80a2e5cdd7e84e 2024-12-10T14:57:17,626 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(7330): checking classloading for deff4b68b658211e9f80a2e5cdd7e84e 2024-12-10T14:57:17,627 INFO [StoreOpener-d07239f99c0a8b2263359f08d82bc5f8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region d07239f99c0a8b2263359f08d82bc5f8 2024-12-10T14:57:17,628 INFO [StoreOpener-deff4b68b658211e9f80a2e5cdd7e84e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region deff4b68b658211e9f80a2e5cdd7e84e 2024-12-10T14:57:17,631 INFO [StoreOpener-deff4b68b658211e9f80a2e5cdd7e84e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region deff4b68b658211e9f80a2e5cdd7e84e columnFamilyName cf 2024-12-10T14:57:17,631 DEBUG [StoreOpener-deff4b68b658211e9f80a2e5cdd7e84e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:57:17,631 INFO [StoreOpener-deff4b68b658211e9f80a2e5cdd7e84e-1 {}] regionserver.HStore(327): Store=deff4b68b658211e9f80a2e5cdd7e84e/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T14:57:17,632 INFO [StoreOpener-d07239f99c0a8b2263359f08d82bc5f8-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d07239f99c0a8b2263359f08d82bc5f8 columnFamilyName cf 2024-12-10T14:57:17,632 DEBUG [StoreOpener-d07239f99c0a8b2263359f08d82bc5f8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:57:17,633 INFO [StoreOpener-d07239f99c0a8b2263359f08d82bc5f8-1 {}] regionserver.HStore(327): Store=d07239f99c0a8b2263359f08d82bc5f8/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T14:57:17,633 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportExpiredSnapshot/deff4b68b658211e9f80a2e5cdd7e84e 2024-12-10T14:57:17,634 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportExpiredSnapshot/deff4b68b658211e9f80a2e5cdd7e84e 2024-12-10T14:57:17,634 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportExpiredSnapshot/d07239f99c0a8b2263359f08d82bc5f8 2024-12-10T14:57:17,634 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportExpiredSnapshot/d07239f99c0a8b2263359f08d82bc5f8 2024-12-10T14:57:17,638 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(1085): writing seq id for deff4b68b658211e9f80a2e5cdd7e84e 2024-12-10T14:57:17,639 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(1085): writing seq id for d07239f99c0a8b2263359f08d82bc5f8 2024-12-10T14:57:17,641 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportExpiredSnapshot/deff4b68b658211e9f80a2e5cdd7e84e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T14:57:17,642 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(1102): Opened deff4b68b658211e9f80a2e5cdd7e84e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73009700, jitterRate=0.08792930841445923}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-10T14:57:17,642 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportExpiredSnapshot/d07239f99c0a8b2263359f08d82bc5f8/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T14:57:17,643 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(1001): Region open journal for deff4b68b658211e9f80a2e5cdd7e84e: 2024-12-10T14:57:17,644 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportExpiredSnapshot,,1733842637253.deff4b68b658211e9f80a2e5cdd7e84e., pid=140, masterSystemTime=1733842637621 2024-12-10T14:57:17,644 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(1102): Opened d07239f99c0a8b2263359f08d82bc5f8; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74756528, jitterRate=0.11395907402038574}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-10T14:57:17,644 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(1001): Region open journal for d07239f99c0a8b2263359f08d82bc5f8: 2024-12-10T14:57:17,645 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportExpiredSnapshot,1,1733842637253.d07239f99c0a8b2263359f08d82bc5f8., pid=139, masterSystemTime=1733842637619 2024-12-10T14:57:17,647 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportExpiredSnapshot,,1733842637253.deff4b68b658211e9f80a2e5cdd7e84e. 2024-12-10T14:57:17,647 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] handler.AssignRegionHandler(164): Opened testtb-testExportExpiredSnapshot,,1733842637253.deff4b68b658211e9f80a2e5cdd7e84e. 2024-12-10T14:57:17,648 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=137 updating hbase:meta row=deff4b68b658211e9f80a2e5cdd7e84e, regionState=OPEN, openSeqNum=2, regionLocation=db1d50717577,37707,1733842502956 2024-12-10T14:57:17,649 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportExpiredSnapshot,1,1733842637253.d07239f99c0a8b2263359f08d82bc5f8. 2024-12-10T14:57:17,649 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] handler.AssignRegionHandler(164): Opened testtb-testExportExpiredSnapshot,1,1733842637253.d07239f99c0a8b2263359f08d82bc5f8. 2024-12-10T14:57:17,650 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=138 updating hbase:meta row=d07239f99c0a8b2263359f08d82bc5f8, regionState=OPEN, openSeqNum=2, regionLocation=db1d50717577,41775,1733842502887 2024-12-10T14:57:17,653 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=140, resume processing ppid=137 2024-12-10T14:57:17,653 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=140, ppid=137, state=SUCCESS; OpenRegionProcedure deff4b68b658211e9f80a2e5cdd7e84e, server=db1d50717577,37707,1733842502956 in 183 msec 2024-12-10T14:57:17,654 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=139, resume processing ppid=138 2024-12-10T14:57:17,654 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=139, ppid=138, state=SUCCESS; OpenRegionProcedure d07239f99c0a8b2263359f08d82bc5f8, server=db1d50717577,41775,1733842502887 in 185 msec 2024-12-10T14:57:17,655 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=137, ppid=136, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=deff4b68b658211e9f80a2e5cdd7e84e, ASSIGN in 342 msec 2024-12-10T14:57:17,657 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=138, resume processing ppid=136 2024-12-10T14:57:17,657 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=138, ppid=136, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=d07239f99c0a8b2263359f08d82bc5f8, ASSIGN in 343 msec 2024-12-10T14:57:17,658 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=136, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-10T14:57:17,658 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733842637658"}]},"ts":"1733842637658"} 2024-12-10T14:57:17,659 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-12-10T14:57:17,664 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=136, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-12-10T14:57:17,664 DEBUG [PEWorker-3 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportExpiredSnapshot jenkins: RWXCA 2024-12-10T14:57:17,666 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41775 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-10T14:57:17,669 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:57:17,669 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:57:17,669 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45255-0x1019cdee8310003, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:57:17,669 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:57:17,672 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-10T14:57:17,673 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-10T14:57:17,673 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-10T14:57:17,673 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-10T14:57:17,675 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=136, state=SUCCESS; CreateTableProcedure table=testtb-testExportExpiredSnapshot in 420 msec 2024-12-10T14:57:17,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-10T14:57:17,862 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportExpiredSnapshot, procId: 136 completed 2024-12-10T14:57:17,862 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testExportExpiredSnapshot get assigned. Timeout = 60000ms 2024-12-10T14:57:17,862 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-10T14:57:17,865 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testExportExpiredSnapshot assigned to meta. Checking AM states. 2024-12-10T14:57:17,866 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-10T14:57:17,866 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testExportExpiredSnapshot assigned. 2024-12-10T14:57:17,869 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-10T14:57:17,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733842637869 (current time:1733842637869). 2024-12-10T14:57:17,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-10T14:57:17,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-10T14:57:17,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-10T14:57:17,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x281e4867 to 127.0.0.1:57176 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@20698883 2024-12-10T14:57:17,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6b59af59, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:57:17,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:57:17,907 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35108, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:57:17,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x281e4867 to 127.0.0.1:57176 2024-12-10T14:57:17,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:57:17,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3f0fe21a to 127.0.0.1:57176 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@534e48e8 2024-12-10T14:57:17,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@75cb16bf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:57:17,967 DEBUG [hconnection-0x56fd3e19-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:57:17,968 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35122, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:57:17,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:57:17,970 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46434, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:57:17,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3f0fe21a to 127.0.0.1:57176 2024-12-10T14:57:17,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:57:17,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-10T14:57:17,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-10T14:57:17,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] procedure2.ProcedureExecutor(1098): Stored pid=141, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-10T14:57:17,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 141 2024-12-10T14:57:17,974 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-10T14:57:17,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=141 2024-12-10T14:57:17,976 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-10T14:57:17,978 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportExpiredSnapshot' 2024-12-10T14:57:17,979 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-10T14:57:17,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742167_1343 (size=170) 2024-12-10T14:57:17,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742167_1343 (size=170) 2024-12-10T14:57:17,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742167_1343 (size=170) 2024-12-10T14:57:17,995 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-10T14:57:17,995 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=142, ppid=141, state=RUNNABLE; SnapshotRegionProcedure deff4b68b658211e9f80a2e5cdd7e84e}, {pid=143, ppid=141, state=RUNNABLE; SnapshotRegionProcedure d07239f99c0a8b2263359f08d82bc5f8}] 2024-12-10T14:57:17,996 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=142, ppid=141, state=RUNNABLE; SnapshotRegionProcedure deff4b68b658211e9f80a2e5cdd7e84e 2024-12-10T14:57:17,996 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=143, ppid=141, state=RUNNABLE; SnapshotRegionProcedure d07239f99c0a8b2263359f08d82bc5f8 2024-12-10T14:57:18,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=141 2024-12-10T14:57:18,147 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,37707,1733842502956 2024-12-10T14:57:18,147 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,41775,1733842502887 2024-12-10T14:57:18,148 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41775 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=143 2024-12-10T14:57:18,148 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37707 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=142 2024-12-10T14:57:18,148 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1733842637253.deff4b68b658211e9f80a2e5cdd7e84e. 2024-12-10T14:57:18,148 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1733842637253.d07239f99c0a8b2263359f08d82bc5f8. 2024-12-10T14:57:18,148 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegion(2538): Flush status journal for deff4b68b658211e9f80a2e5cdd7e84e: 2024-12-10T14:57:18,148 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1733842637253.deff4b68b658211e9f80a2e5cdd7e84e. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-12-10T14:57:18,148 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] regionserver.HRegion(2538): Flush status journal for d07239f99c0a8b2263359f08d82bc5f8: 2024-12-10T14:57:18,148 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1733842637253.d07239f99c0a8b2263359f08d82bc5f8. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-12-10T14:57:18,149 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1733842637253.deff4b68b658211e9f80a2e5cdd7e84e.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-12-10T14:57:18,149 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-10T14:57:18,149 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1733842637253.d07239f99c0a8b2263359f08d82bc5f8.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-12-10T14:57:18,149 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-10T14:57:18,149 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-10T14:57:18,149 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-10T14:57:18,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742168_1344 (size=71) 2024-12-10T14:57:18,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742168_1344 (size=71) 2024-12-10T14:57:18,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742168_1344 (size=71) 2024-12-10T14:57:18,163 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1733842637253.d07239f99c0a8b2263359f08d82bc5f8. 2024-12-10T14:57:18,163 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=143 2024-12-10T14:57:18,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.HMaster(4106): Remote procedure done, pid=143 2024-12-10T14:57:18,164 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region d07239f99c0a8b2263359f08d82bc5f8 2024-12-10T14:57:18,164 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=143, ppid=141, state=RUNNABLE; SnapshotRegionProcedure d07239f99c0a8b2263359f08d82bc5f8 2024-12-10T14:57:18,166 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=143, ppid=141, state=SUCCESS; SnapshotRegionProcedure d07239f99c0a8b2263359f08d82bc5f8 in 170 msec 2024-12-10T14:57:18,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742169_1345 (size=71) 2024-12-10T14:57:18,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742169_1345 (size=71) 2024-12-10T14:57:18,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742169_1345 (size=71) 2024-12-10T14:57:18,170 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1733842637253.deff4b68b658211e9f80a2e5cdd7e84e. 2024-12-10T14:57:18,170 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=142 2024-12-10T14:57:18,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.HMaster(4106): Remote procedure done, pid=142 2024-12-10T14:57:18,170 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region deff4b68b658211e9f80a2e5cdd7e84e 2024-12-10T14:57:18,170 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=142, ppid=141, state=RUNNABLE; SnapshotRegionProcedure deff4b68b658211e9f80a2e5cdd7e84e 2024-12-10T14:57:18,172 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=142, resume processing ppid=141 2024-12-10T14:57:18,172 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=142, ppid=141, state=SUCCESS; SnapshotRegionProcedure deff4b68b658211e9f80a2e5cdd7e84e in 176 msec 2024-12-10T14:57:18,173 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-10T14:57:18,173 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-10T14:57:18,174 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-10T14:57:18,174 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportExpiredSnapshot 2024-12-10T14:57:18,175 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot 2024-12-10T14:57:18,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742170_1346 (size=552) 2024-12-10T14:57:18,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742170_1346 (size=552) 2024-12-10T14:57:18,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742170_1346 (size=552) 2024-12-10T14:57:18,190 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-10T14:57:18,195 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-10T14:57:18,196 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/emptySnaptb0-testExportExpiredSnapshot 2024-12-10T14:57:18,197 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-10T14:57:18,197 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 141 2024-12-10T14:57:18,199 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=141, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 225 msec 2024-12-10T14:57:18,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=141 2024-12-10T14:57:18,277 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot, procId: 141 completed 2024-12-10T14:57:18,287 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41775 {}] regionserver.HRegion(8254): writing data to region testtb-testExportExpiredSnapshot,1,1733842637253.d07239f99c0a8b2263359f08d82bc5f8. with WAL disabled. Data may be lost in the event of a crash. 2024-12-10T14:57:18,293 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37707 {}] regionserver.HRegion(8254): writing data to region testtb-testExportExpiredSnapshot,,1733842637253.deff4b68b658211e9f80a2e5cdd7e84e. with WAL disabled. Data may be lost in the event of a crash. 2024-12-10T14:57:18,297 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportExpiredSnapshot 2024-12-10T14:57:18,298 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportExpiredSnapshot,,1733842637253.deff4b68b658211e9f80a2e5cdd7e84e. 2024-12-10T14:57:18,298 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-10T14:57:18,310 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-10T14:57:18,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733842638310 (current time:1733842638310). 2024-12-10T14:57:18,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-10T14:57:18,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-10T14:57:18,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-10T14:57:18,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1ad98c6a to 127.0.0.1:57176 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@25eb64de 2024-12-10T14:57:18,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@238064f4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:57:18,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:57:18,318 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35134, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:57:18,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1ad98c6a to 127.0.0.1:57176 2024-12-10T14:57:18,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:57:18,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x12579835 to 127.0.0.1:57176 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6c64c335 2024-12-10T14:57:18,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c63bd89, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:57:18,340 DEBUG [hconnection-0x57f3e947-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:57:18,341 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35142, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:57:18,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:57:18,343 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46444, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:57:18,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x12579835 to 127.0.0.1:57176 2024-12-10T14:57:18,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:57:18,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-10T14:57:18,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-10T14:57:18,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] procedure2.ProcedureExecutor(1098): Stored pid=144, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-10T14:57:18,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 144 2024-12-10T14:57:18,347 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-10T14:57:18,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-10T14:57:18,348 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-10T14:57:18,351 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-10T14:57:18,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742171_1347 (size=165) 2024-12-10T14:57:18,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742171_1347 (size=165) 2024-12-10T14:57:18,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742171_1347 (size=165) 2024-12-10T14:57:18,365 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-10T14:57:18,365 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=145, ppid=144, state=RUNNABLE; SnapshotRegionProcedure deff4b68b658211e9f80a2e5cdd7e84e}, {pid=146, ppid=144, state=RUNNABLE; SnapshotRegionProcedure d07239f99c0a8b2263359f08d82bc5f8}] 2024-12-10T14:57:18,366 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=145, ppid=144, state=RUNNABLE; SnapshotRegionProcedure deff4b68b658211e9f80a2e5cdd7e84e 2024-12-10T14:57:18,366 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=146, ppid=144, state=RUNNABLE; SnapshotRegionProcedure d07239f99c0a8b2263359f08d82bc5f8 2024-12-10T14:57:18,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-10T14:57:18,517 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,37707,1733842502956 2024-12-10T14:57:18,517 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,41775,1733842502887 2024-12-10T14:57:18,518 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41775 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=146 2024-12-10T14:57:18,518 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37707 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=145 2024-12-10T14:57:18,518 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1733842637253.d07239f99c0a8b2263359f08d82bc5f8. 2024-12-10T14:57:18,519 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1733842637253.deff4b68b658211e9f80a2e5cdd7e84e. 2024-12-10T14:57:18,519 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.HRegion(2837): Flushing deff4b68b658211e9f80a2e5cdd7e84e 1/1 column families, dataSize=333 B heapSize=976 B 2024-12-10T14:57:18,519 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.HRegion(2837): Flushing d07239f99c0a8b2263359f08d82bc5f8 1/1 column families, dataSize=2.93 KB heapSize=6.58 KB 2024-12-10T14:57:18,538 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportExpiredSnapshot/deff4b68b658211e9f80a2e5cdd7e84e/.tmp/cf/002d36ac35fb4f8fae840cec94c520f8 is 71, key is 096d6abbcc1f16a3e717c9c2ad3e619f/cf:q/1733842638293/Put/seqid=0 2024-12-10T14:57:18,546 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportExpiredSnapshot/d07239f99c0a8b2263359f08d82bc5f8/.tmp/cf/65c5e3c6b7c54a22828818526f234207 is 71, key is 1351b65ce8beb003b7c260444fa35098/cf:q/1733842638287/Put/seqid=0 2024-12-10T14:57:18,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742172_1348 (size=5424) 2024-12-10T14:57:18,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742172_1348 (size=5424) 2024-12-10T14:57:18,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742172_1348 (size=5424) 2024-12-10T14:57:18,551 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=333 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportExpiredSnapshot/deff4b68b658211e9f80a2e5cdd7e84e/.tmp/cf/002d36ac35fb4f8fae840cec94c520f8 2024-12-10T14:57:18,559 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportExpiredSnapshot/deff4b68b658211e9f80a2e5cdd7e84e/.tmp/cf/002d36ac35fb4f8fae840cec94c520f8 as hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportExpiredSnapshot/deff4b68b658211e9f80a2e5cdd7e84e/cf/002d36ac35fb4f8fae840cec94c520f8 2024-12-10T14:57:18,565 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportExpiredSnapshot/deff4b68b658211e9f80a2e5cdd7e84e/cf/002d36ac35fb4f8fae840cec94c520f8, entries=5, sequenceid=6, filesize=5.3 K 2024-12-10T14:57:18,566 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.HRegion(3040): Finished flush of dataSize ~333 B/333, heapSize ~960 B/960, currentSize=0 B/0 for deff4b68b658211e9f80a2e5cdd7e84e in 47ms, sequenceid=6, compaction requested=false 2024-12-10T14:57:18,566 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.HRegion(2538): Flush status journal for deff4b68b658211e9f80a2e5cdd7e84e: 2024-12-10T14:57:18,566 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1733842637253.deff4b68b658211e9f80a2e5cdd7e84e. for snaptb0-testExportExpiredSnapshot completed. 2024-12-10T14:57:18,567 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1733842637253.deff4b68b658211e9f80a2e5cdd7e84e.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-10T14:57:18,567 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-10T14:57:18,567 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportExpiredSnapshot/deff4b68b658211e9f80a2e5cdd7e84e/cf/002d36ac35fb4f8fae840cec94c520f8] hfiles 2024-12-10T14:57:18,567 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportExpiredSnapshot/deff4b68b658211e9f80a2e5cdd7e84e/cf/002d36ac35fb4f8fae840cec94c520f8 for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-10T14:57:18,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742173_1349 (size=8188) 2024-12-10T14:57:18,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742173_1349 (size=8188) 2024-12-10T14:57:18,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742173_1349 (size=8188) 2024-12-10T14:57:18,581 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.93 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportExpiredSnapshot/d07239f99c0a8b2263359f08d82bc5f8/.tmp/cf/65c5e3c6b7c54a22828818526f234207 2024-12-10T14:57:18,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742174_1350 (size=110) 2024-12-10T14:57:18,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742174_1350 (size=110) 2024-12-10T14:57:18,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742174_1350 (size=110) 2024-12-10T14:57:18,591 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportExpiredSnapshot/d07239f99c0a8b2263359f08d82bc5f8/.tmp/cf/65c5e3c6b7c54a22828818526f234207 as hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportExpiredSnapshot/d07239f99c0a8b2263359f08d82bc5f8/cf/65c5e3c6b7c54a22828818526f234207 2024-12-10T14:57:18,593 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1733842637253.deff4b68b658211e9f80a2e5cdd7e84e. 2024-12-10T14:57:18,593 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=145 2024-12-10T14:57:18,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.HMaster(4106): Remote procedure done, pid=145 2024-12-10T14:57:18,594 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region deff4b68b658211e9f80a2e5cdd7e84e 2024-12-10T14:57:18,594 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=145, ppid=144, state=RUNNABLE; SnapshotRegionProcedure deff4b68b658211e9f80a2e5cdd7e84e 2024-12-10T14:57:18,601 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportExpiredSnapshot/d07239f99c0a8b2263359f08d82bc5f8/cf/65c5e3c6b7c54a22828818526f234207, entries=45, sequenceid=6, filesize=8.0 K 2024-12-10T14:57:18,602 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.HRegion(3040): Finished flush of dataSize ~2.93 KB/3003, heapSize ~6.56 KB/6720, currentSize=0 B/0 for d07239f99c0a8b2263359f08d82bc5f8 in 83ms, sequenceid=6, compaction requested=false 2024-12-10T14:57:18,602 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.HRegion(2538): Flush status journal for d07239f99c0a8b2263359f08d82bc5f8: 2024-12-10T14:57:18,602 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1733842637253.d07239f99c0a8b2263359f08d82bc5f8. for snaptb0-testExportExpiredSnapshot completed. 2024-12-10T14:57:18,602 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1733842637253.d07239f99c0a8b2263359f08d82bc5f8.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-10T14:57:18,602 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-10T14:57:18,602 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportExpiredSnapshot/d07239f99c0a8b2263359f08d82bc5f8/cf/65c5e3c6b7c54a22828818526f234207] hfiles 2024-12-10T14:57:18,602 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportExpiredSnapshot/d07239f99c0a8b2263359f08d82bc5f8/cf/65c5e3c6b7c54a22828818526f234207 for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-10T14:57:18,603 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=145, ppid=144, state=SUCCESS; SnapshotRegionProcedure deff4b68b658211e9f80a2e5cdd7e84e in 230 msec 2024-12-10T14:57:18,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742175_1351 (size=110) 2024-12-10T14:57:18,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742175_1351 (size=110) 2024-12-10T14:57:18,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742175_1351 (size=110) 2024-12-10T14:57:18,624 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1733842637253.d07239f99c0a8b2263359f08d82bc5f8. 2024-12-10T14:57:18,624 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=146 2024-12-10T14:57:18,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.HMaster(4106): Remote procedure done, pid=146 2024-12-10T14:57:18,625 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region d07239f99c0a8b2263359f08d82bc5f8 2024-12-10T14:57:18,625 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=146, ppid=144, state=RUNNABLE; SnapshotRegionProcedure d07239f99c0a8b2263359f08d82bc5f8 2024-12-10T14:57:18,628 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=146, resume processing ppid=144 2024-12-10T14:57:18,628 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-10T14:57:18,628 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=146, ppid=144, state=SUCCESS; SnapshotRegionProcedure d07239f99c0a8b2263359f08d82bc5f8 in 261 msec 2024-12-10T14:57:18,629 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-10T14:57:18,629 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-10T14:57:18,630 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportExpiredSnapshot 2024-12-10T14:57:18,630 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot 2024-12-10T14:57:18,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742176_1352 (size=630) 2024-12-10T14:57:18,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742176_1352 (size=630) 2024-12-10T14:57:18,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742176_1352 (size=630) 2024-12-10T14:57:18,646 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-10T14:57:18,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-10T14:57:18,652 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-10T14:57:18,653 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/snaptb0-testExportExpiredSnapshot 2024-12-10T14:57:18,654 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-10T14:57:18,654 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 144 2024-12-10T14:57:18,655 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=144, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 308 msec 2024-12-10T14:57:18,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-10T14:57:18,952 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot, procId: 144 completed 2024-12-10T14:57:18,954 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-10T14:57:18,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] procedure2.ProcedureExecutor(1098): Stored pid=147, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testExportExpiredSnapshot 2024-12-10T14:57:18,956 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-12-10T14:57:18,956 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:57:18,956 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportExpiredSnapshot" procId is: 147 2024-12-10T14:57:18,957 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-10T14:57:18,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-12-10T14:57:18,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742177_1353 (size=400) 2024-12-10T14:57:18,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742177_1353 (size=400) 2024-12-10T14:57:18,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742177_1353 (size=400) 2024-12-10T14:57:18,975 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 3d3c5d37b59f3a27bd963f36e8591d10, NAME => 'testExportExpiredSnapshot,,1733842638953.3d3c5d37b59f3a27bd963f36e8591d10.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef 2024-12-10T14:57:18,975 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 32558485cb37ace42cbc3671b13298ba, NAME => 'testExportExpiredSnapshot,1,1733842638953.32558485cb37ace42cbc3671b13298ba.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef 2024-12-10T14:57:19,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742178_1354 (size=61) 2024-12-10T14:57:19,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742178_1354 (size=61) 2024-12-10T14:57:19,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742178_1354 (size=61) 2024-12-10T14:57:19,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742179_1355 (size=61) 2024-12-10T14:57:19,007 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(894): Instantiated testExportExpiredSnapshot,,1733842638953.3d3c5d37b59f3a27bd963f36e8591d10.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:57:19,008 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1681): Closing 3d3c5d37b59f3a27bd963f36e8591d10, disabling compactions & flushes 2024-12-10T14:57:19,008 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1703): Closing region testExportExpiredSnapshot,,1733842638953.3d3c5d37b59f3a27bd963f36e8591d10. 2024-12-10T14:57:19,008 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportExpiredSnapshot,,1733842638953.3d3c5d37b59f3a27bd963f36e8591d10. 2024-12-10T14:57:19,008 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testExportExpiredSnapshot,,1733842638953.3d3c5d37b59f3a27bd963f36e8591d10. after waiting 0 ms 2024-12-10T14:57:19,008 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testExportExpiredSnapshot,,1733842638953.3d3c5d37b59f3a27bd963f36e8591d10. 2024-12-10T14:57:19,008 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1922): Closed testExportExpiredSnapshot,,1733842638953.3d3c5d37b59f3a27bd963f36e8591d10. 2024-12-10T14:57:19,008 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1635): Region close journal for 3d3c5d37b59f3a27bd963f36e8591d10: 2024-12-10T14:57:19,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742179_1355 (size=61) 2024-12-10T14:57:19,009 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(894): Instantiated testExportExpiredSnapshot,1,1733842638953.32558485cb37ace42cbc3671b13298ba.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:57:19,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742179_1355 (size=61) 2024-12-10T14:57:19,009 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1681): Closing 32558485cb37ace42cbc3671b13298ba, disabling compactions & flushes 2024-12-10T14:57:19,009 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1703): Closing region testExportExpiredSnapshot,1,1733842638953.32558485cb37ace42cbc3671b13298ba. 2024-12-10T14:57:19,009 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportExpiredSnapshot,1,1733842638953.32558485cb37ace42cbc3671b13298ba. 2024-12-10T14:57:19,009 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testExportExpiredSnapshot,1,1733842638953.32558485cb37ace42cbc3671b13298ba. after waiting 0 ms 2024-12-10T14:57:19,009 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testExportExpiredSnapshot,1,1733842638953.32558485cb37ace42cbc3671b13298ba. 2024-12-10T14:57:19,009 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1922): Closed testExportExpiredSnapshot,1,1733842638953.32558485cb37ace42cbc3671b13298ba. 2024-12-10T14:57:19,009 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1635): Region close journal for 32558485cb37ace42cbc3671b13298ba: 2024-12-10T14:57:19,010 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-12-10T14:57:19,011 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,,1733842638953.3d3c5d37b59f3a27bd963f36e8591d10.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1733842639010"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733842639010"}]},"ts":"1733842639010"} 2024-12-10T14:57:19,011 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,1,1733842638953.32558485cb37ace42cbc3671b13298ba.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1733842639010"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733842639010"}]},"ts":"1733842639010"} 2024-12-10T14:57:19,013 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-10T14:57:19,014 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-10T14:57:19,014 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733842639014"}]},"ts":"1733842639014"} 2024-12-10T14:57:19,015 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-12-10T14:57:19,020 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(202): Hosts are {db1d50717577=0} racks are {/default-rack=0} 2024-12-10T14:57:19,022 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-10T14:57:19,022 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-10T14:57:19,022 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-10T14:57:19,022 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-10T14:57:19,022 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-10T14:57:19,022 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-10T14:57:19,022 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-10T14:57:19,022 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=148, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=3d3c5d37b59f3a27bd963f36e8591d10, ASSIGN}, {pid=149, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=32558485cb37ace42cbc3671b13298ba, ASSIGN}] 2024-12-10T14:57:19,023 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=149, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=32558485cb37ace42cbc3671b13298ba, ASSIGN 2024-12-10T14:57:19,023 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=148, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=3d3c5d37b59f3a27bd963f36e8591d10, ASSIGN 2024-12-10T14:57:19,024 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=148, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=3d3c5d37b59f3a27bd963f36e8591d10, ASSIGN; state=OFFLINE, location=db1d50717577,41775,1733842502887; forceNewPlan=false, retain=false 2024-12-10T14:57:19,024 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=149, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=32558485cb37ace42cbc3671b13298ba, ASSIGN; state=OFFLINE, location=db1d50717577,37707,1733842502956; forceNewPlan=false, retain=false 2024-12-10T14:57:19,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-12-10T14:57:19,174 INFO [db1d50717577:36533 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-10T14:57:19,175 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=149 updating hbase:meta row=32558485cb37ace42cbc3671b13298ba, regionState=OPENING, regionLocation=db1d50717577,37707,1733842502956 2024-12-10T14:57:19,175 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=148 updating hbase:meta row=3d3c5d37b59f3a27bd963f36e8591d10, regionState=OPENING, regionLocation=db1d50717577,41775,1733842502887 2024-12-10T14:57:19,177 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=150, ppid=148, state=RUNNABLE; OpenRegionProcedure 3d3c5d37b59f3a27bd963f36e8591d10, server=db1d50717577,41775,1733842502887}] 2024-12-10T14:57:19,178 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=151, ppid=149, state=RUNNABLE; OpenRegionProcedure 32558485cb37ace42cbc3671b13298ba, server=db1d50717577,37707,1733842502956}] 2024-12-10T14:57:19,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-12-10T14:57:19,330 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,37707,1733842502956 2024-12-10T14:57:19,330 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,41775,1733842502887 2024-12-10T14:57:19,333 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] handler.AssignRegionHandler(135): Open testExportExpiredSnapshot,1,1733842638953.32558485cb37ace42cbc3671b13298ba. 2024-12-10T14:57:19,333 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(7285): Opening region: {ENCODED => 32558485cb37ace42cbc3671b13298ba, NAME => 'testExportExpiredSnapshot,1,1733842638953.32558485cb37ace42cbc3671b13298ba.', STARTKEY => '1', ENDKEY => ''} 2024-12-10T14:57:19,334 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(7999): Registered coprocessor service: region=testExportExpiredSnapshot,1,1733842638953.32558485cb37ace42cbc3671b13298ba. service=AccessControlService 2024-12-10T14:57:19,334 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-10T14:57:19,334 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot 32558485cb37ace42cbc3671b13298ba 2024-12-10T14:57:19,334 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(894): Instantiated testExportExpiredSnapshot,1,1733842638953.32558485cb37ace42cbc3671b13298ba.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:57:19,334 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(7327): checking encryption for 32558485cb37ace42cbc3671b13298ba 2024-12-10T14:57:19,334 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(7330): checking classloading for 32558485cb37ace42cbc3671b13298ba 2024-12-10T14:57:19,336 INFO [StoreOpener-32558485cb37ace42cbc3671b13298ba-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 32558485cb37ace42cbc3671b13298ba 2024-12-10T14:57:19,338 INFO [StoreOpener-32558485cb37ace42cbc3671b13298ba-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 32558485cb37ace42cbc3671b13298ba columnFamilyName cf 2024-12-10T14:57:19,338 DEBUG [StoreOpener-32558485cb37ace42cbc3671b13298ba-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:57:19,339 INFO [StoreOpener-32558485cb37ace42cbc3671b13298ba-1 {}] regionserver.HStore(327): Store=32558485cb37ace42cbc3671b13298ba/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T14:57:19,339 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testExportExpiredSnapshot/32558485cb37ace42cbc3671b13298ba 2024-12-10T14:57:19,340 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testExportExpiredSnapshot/32558485cb37ace42cbc3671b13298ba 2024-12-10T14:57:19,343 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] handler.AssignRegionHandler(135): Open testExportExpiredSnapshot,,1733842638953.3d3c5d37b59f3a27bd963f36e8591d10. 2024-12-10T14:57:19,343 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(7285): Opening region: {ENCODED => 3d3c5d37b59f3a27bd963f36e8591d10, NAME => 'testExportExpiredSnapshot,,1733842638953.3d3c5d37b59f3a27bd963f36e8591d10.', STARTKEY => '', ENDKEY => '1'} 2024-12-10T14:57:19,343 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(1085): writing seq id for 32558485cb37ace42cbc3671b13298ba 2024-12-10T14:57:19,343 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(7999): Registered coprocessor service: region=testExportExpiredSnapshot,,1733842638953.3d3c5d37b59f3a27bd963f36e8591d10. service=AccessControlService 2024-12-10T14:57:19,343 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-10T14:57:19,343 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot 3d3c5d37b59f3a27bd963f36e8591d10 2024-12-10T14:57:19,343 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(894): Instantiated testExportExpiredSnapshot,,1733842638953.3d3c5d37b59f3a27bd963f36e8591d10.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:57:19,344 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(7327): checking encryption for 3d3c5d37b59f3a27bd963f36e8591d10 2024-12-10T14:57:19,344 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(7330): checking classloading for 3d3c5d37b59f3a27bd963f36e8591d10 2024-12-10T14:57:19,346 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testExportExpiredSnapshot/32558485cb37ace42cbc3671b13298ba/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T14:57:19,346 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(1102): Opened 32558485cb37ace42cbc3671b13298ba; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=58850623, jitterRate=-0.12305738031864166}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-10T14:57:19,347 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(1001): Region open journal for 32558485cb37ace42cbc3671b13298ba: 2024-12-10T14:57:19,348 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegionServer(2601): Post open deploy tasks for testExportExpiredSnapshot,1,1733842638953.32558485cb37ace42cbc3671b13298ba., pid=151, masterSystemTime=1733842639330 2024-12-10T14:57:19,350 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegionServer(2628): Finished post open deploy task for testExportExpiredSnapshot,1,1733842638953.32558485cb37ace42cbc3671b13298ba. 2024-12-10T14:57:19,350 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] handler.AssignRegionHandler(164): Opened testExportExpiredSnapshot,1,1733842638953.32558485cb37ace42cbc3671b13298ba. 2024-12-10T14:57:19,350 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=149 updating hbase:meta row=32558485cb37ace42cbc3671b13298ba, regionState=OPEN, openSeqNum=2, regionLocation=db1d50717577,37707,1733842502956 2024-12-10T14:57:19,353 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=151, resume processing ppid=149 2024-12-10T14:57:19,353 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=151, ppid=149, state=SUCCESS; OpenRegionProcedure 32558485cb37ace42cbc3671b13298ba, server=db1d50717577,37707,1733842502956 in 174 msec 2024-12-10T14:57:19,354 INFO [StoreOpener-3d3c5d37b59f3a27bd963f36e8591d10-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 3d3c5d37b59f3a27bd963f36e8591d10 2024-12-10T14:57:19,354 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=149, ppid=147, state=SUCCESS; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=32558485cb37ace42cbc3671b13298ba, ASSIGN in 331 msec 2024-12-10T14:57:19,355 INFO [StoreOpener-3d3c5d37b59f3a27bd963f36e8591d10-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3d3c5d37b59f3a27bd963f36e8591d10 columnFamilyName cf 2024-12-10T14:57:19,355 DEBUG [StoreOpener-3d3c5d37b59f3a27bd963f36e8591d10-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:57:19,356 INFO [StoreOpener-3d3c5d37b59f3a27bd963f36e8591d10-1 {}] regionserver.HStore(327): Store=3d3c5d37b59f3a27bd963f36e8591d10/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T14:57:19,357 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testExportExpiredSnapshot/3d3c5d37b59f3a27bd963f36e8591d10 2024-12-10T14:57:19,357 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testExportExpiredSnapshot/3d3c5d37b59f3a27bd963f36e8591d10 2024-12-10T14:57:19,359 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(1085): writing seq id for 3d3c5d37b59f3a27bd963f36e8591d10 2024-12-10T14:57:19,361 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testExportExpiredSnapshot/3d3c5d37b59f3a27bd963f36e8591d10/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T14:57:19,362 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(1102): Opened 3d3c5d37b59f3a27bd963f36e8591d10; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72510966, jitterRate=0.08049759268760681}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-10T14:57:19,362 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(1001): Region open journal for 3d3c5d37b59f3a27bd963f36e8591d10: 2024-12-10T14:57:19,363 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegionServer(2601): Post open deploy tasks for testExportExpiredSnapshot,,1733842638953.3d3c5d37b59f3a27bd963f36e8591d10., pid=150, masterSystemTime=1733842639330 2024-12-10T14:57:19,365 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegionServer(2628): Finished post open deploy task for testExportExpiredSnapshot,,1733842638953.3d3c5d37b59f3a27bd963f36e8591d10. 2024-12-10T14:57:19,365 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] handler.AssignRegionHandler(164): Opened testExportExpiredSnapshot,,1733842638953.3d3c5d37b59f3a27bd963f36e8591d10. 2024-12-10T14:57:19,365 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=148 updating hbase:meta row=3d3c5d37b59f3a27bd963f36e8591d10, regionState=OPEN, openSeqNum=2, regionLocation=db1d50717577,41775,1733842502887 2024-12-10T14:57:19,370 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=150, resume processing ppid=148 2024-12-10T14:57:19,370 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=150, ppid=148, state=SUCCESS; OpenRegionProcedure 3d3c5d37b59f3a27bd963f36e8591d10, server=db1d50717577,41775,1733842502887 in 190 msec 2024-12-10T14:57:19,371 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=148, resume processing ppid=147 2024-12-10T14:57:19,371 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=148, ppid=147, state=SUCCESS; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=3d3c5d37b59f3a27bd963f36e8591d10, ASSIGN in 348 msec 2024-12-10T14:57:19,372 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-10T14:57:19,372 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733842639372"}]},"ts":"1733842639372"} 2024-12-10T14:57:19,374 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-12-10T14:57:19,376 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-12-10T14:57:19,377 DEBUG [PEWorker-5 {}] access.PermissionStorage(175): Writing permission with rowKey testExportExpiredSnapshot jenkins: RWXCA 2024-12-10T14:57:19,379 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41775 {}] access.PermissionStorage(611): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-10T14:57:19,380 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:57:19,380 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45255-0x1019cdee8310003, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:57:19,380 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:57:19,380 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:57:19,383 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-10T14:57:19,383 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-10T14:57:19,383 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-10T14:57:19,383 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-10T14:57:19,383 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-10T14:57:19,383 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-10T14:57:19,383 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-10T14:57:19,384 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-10T14:57:19,385 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=147, state=SUCCESS; CreateTableProcedure table=testExportExpiredSnapshot in 428 msec 2024-12-10T14:57:19,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-12-10T14:57:19,562 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testExportExpiredSnapshot, procId: 147 completed 2024-12-10T14:57:19,562 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testExportExpiredSnapshot get assigned. Timeout = 60000ms 2024-12-10T14:57:19,563 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-10T14:57:19,566 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testExportExpiredSnapshot assigned to meta. Checking AM states. 2024-12-10T14:57:19,567 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-10T14:57:19,567 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testExportExpiredSnapshot assigned. 2024-12-10T14:57:19,574 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41775 {}] regionserver.HRegion(8254): writing data to region testExportExpiredSnapshot,,1733842638953.3d3c5d37b59f3a27bd963f36e8591d10. with WAL disabled. Data may be lost in the event of a crash. 2024-12-10T14:57:19,575 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37707 {}] regionserver.HRegion(8254): writing data to region testExportExpiredSnapshot,1,1733842638953.32558485cb37ace42cbc3671b13298ba. with WAL disabled. Data may be lost in the event of a crash. 2024-12-10T14:57:19,578 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testExportExpiredSnapshot 2024-12-10T14:57:19,578 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testExportExpiredSnapshot,,1733842638953.3d3c5d37b59f3a27bd963f36e8591d10. 2024-12-10T14:57:19,578 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-10T14:57:19,587 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-12-10T14:57:19,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snapshot-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-10T14:57:19,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-10T14:57:19,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x09169249 to 127.0.0.1:57176 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@585d0d24 2024-12-10T14:57:19,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b0ad287, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:57:19,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:57:19,596 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35144, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:57:19,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x09169249 to 127.0.0.1:57176 2024-12-10T14:57:19,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:57:19,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x26707d02 to 127.0.0.1:57176 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@167460c5 2024-12-10T14:57:19,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6b971666, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:57:19,605 DEBUG [hconnection-0x390bb59c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:57:19,606 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35152, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:57:19,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:57:19,608 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46456, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:57:19,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x26707d02 to 127.0.0.1:57176 2024-12-10T14:57:19,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:57:19,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] access.PermissionStorage(611): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-10T14:57:19,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-10T14:57:19,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] procedure2.ProcedureExecutor(1098): Stored pid=152, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-12-10T14:57:19,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 152 2024-12-10T14:57:19,611 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PREPARE 2024-12-10T14:57:19,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-10T14:57:19,612 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-10T14:57:19,614 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-10T14:57:19,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742180_1356 (size=152) 2024-12-10T14:57:19,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742180_1356 (size=152) 2024-12-10T14:57:19,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742180_1356 (size=152) 2024-12-10T14:57:19,621 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-10T14:57:19,621 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=153, ppid=152, state=RUNNABLE; SnapshotRegionProcedure 3d3c5d37b59f3a27bd963f36e8591d10}, {pid=154, ppid=152, state=RUNNABLE; SnapshotRegionProcedure 32558485cb37ace42cbc3671b13298ba}] 2024-12-10T14:57:19,622 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=154, ppid=152, state=RUNNABLE; SnapshotRegionProcedure 32558485cb37ace42cbc3671b13298ba 2024-12-10T14:57:19,622 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=153, ppid=152, state=RUNNABLE; SnapshotRegionProcedure 3d3c5d37b59f3a27bd963f36e8591d10 2024-12-10T14:57:19,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-10T14:57:19,773 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,37707,1733842502956 2024-12-10T14:57:19,773 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,41775,1733842502887 2024-12-10T14:57:19,773 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37707 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=154 2024-12-10T14:57:19,773 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41775 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=153 2024-12-10T14:57:19,773 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,,1733842638953.3d3c5d37b59f3a27bd963f36e8591d10. 2024-12-10T14:57:19,773 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,1,1733842638953.32558485cb37ace42cbc3671b13298ba. 2024-12-10T14:57:19,774 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.HRegion(2837): Flushing 3d3c5d37b59f3a27bd963f36e8591d10 1/1 column families, dataSize=132 B heapSize=544 B 2024-12-10T14:57:19,774 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.HRegion(2837): Flushing 32558485cb37ace42cbc3671b13298ba 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-12-10T14:57:19,793 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testExportExpiredSnapshot/3d3c5d37b59f3a27bd963f36e8591d10/.tmp/cf/f2cefe05e813402c9a40970b12548028 is 71, key is 03d2a2ba6220db3de57aee03e12ce736/cf:q/1733842639574/Put/seqid=0 2024-12-10T14:57:19,793 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testExportExpiredSnapshot/32558485cb37ace42cbc3671b13298ba/.tmp/cf/9486f807c1d040e6be764b3d98f762c2 is 71, key is 21b4c9eb22063120d033a3a62f8a9f5f/cf:q/1733842639575/Put/seqid=0 2024-12-10T14:57:19,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742182_1358 (size=5216) 2024-12-10T14:57:19,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742182_1358 (size=5216) 2024-12-10T14:57:19,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742182_1358 (size=5216) 2024-12-10T14:57:19,803 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=132 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testExportExpiredSnapshot/3d3c5d37b59f3a27bd963f36e8591d10/.tmp/cf/f2cefe05e813402c9a40970b12548028 2024-12-10T14:57:19,807 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testExportExpiredSnapshot/3d3c5d37b59f3a27bd963f36e8591d10/.tmp/cf/f2cefe05e813402c9a40970b12548028 as hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testExportExpiredSnapshot/3d3c5d37b59f3a27bd963f36e8591d10/cf/f2cefe05e813402c9a40970b12548028 2024-12-10T14:57:19,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742181_1357 (size=8394) 2024-12-10T14:57:19,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742181_1357 (size=8394) 2024-12-10T14:57:19,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742181_1357 (size=8394) 2024-12-10T14:57:19,811 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.13 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testExportExpiredSnapshot/32558485cb37ace42cbc3671b13298ba/.tmp/cf/9486f807c1d040e6be764b3d98f762c2 2024-12-10T14:57:19,813 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testExportExpiredSnapshot/3d3c5d37b59f3a27bd963f36e8591d10/cf/f2cefe05e813402c9a40970b12548028, entries=2, sequenceid=5, filesize=5.1 K 2024-12-10T14:57:19,814 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.HRegion(3040): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for 3d3c5d37b59f3a27bd963f36e8591d10 in 40ms, sequenceid=5, compaction requested=false 2024-12-10T14:57:19,814 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportExpiredSnapshot' 2024-12-10T14:57:19,815 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.HRegion(2538): Flush status journal for 3d3c5d37b59f3a27bd963f36e8591d10: 2024-12-10T14:57:19,815 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,,1733842638953.3d3c5d37b59f3a27bd963f36e8591d10. for snapshot-testExportExpiredSnapshot completed. 2024-12-10T14:57:19,815 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,,1733842638953.3d3c5d37b59f3a27bd963f36e8591d10.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-12-10T14:57:19,815 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-10T14:57:19,815 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testExportExpiredSnapshot/3d3c5d37b59f3a27bd963f36e8591d10/cf/f2cefe05e813402c9a40970b12548028] hfiles 2024-12-10T14:57:19,815 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testExportExpiredSnapshot/3d3c5d37b59f3a27bd963f36e8591d10/cf/f2cefe05e813402c9a40970b12548028 for snapshot=snapshot-testExportExpiredSnapshot 2024-12-10T14:57:19,816 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testExportExpiredSnapshot/32558485cb37ace42cbc3671b13298ba/.tmp/cf/9486f807c1d040e6be764b3d98f762c2 as hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testExportExpiredSnapshot/32558485cb37ace42cbc3671b13298ba/cf/9486f807c1d040e6be764b3d98f762c2 2024-12-10T14:57:19,821 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testExportExpiredSnapshot/32558485cb37ace42cbc3671b13298ba/cf/9486f807c1d040e6be764b3d98f762c2, entries=48, sequenceid=5, filesize=8.2 K 2024-12-10T14:57:19,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742183_1359 (size=103) 2024-12-10T14:57:19,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742183_1359 (size=103) 2024-12-10T14:57:19,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742183_1359 (size=103) 2024-12-10T14:57:19,823 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.HRegion(3040): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 32558485cb37ace42cbc3671b13298ba in 49ms, sequenceid=5, compaction requested=false 2024-12-10T14:57:19,823 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,,1733842638953.3d3c5d37b59f3a27bd963f36e8591d10. 2024-12-10T14:57:19,823 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.HRegion(2538): Flush status journal for 32558485cb37ace42cbc3671b13298ba: 2024-12-10T14:57:19,823 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=153 2024-12-10T14:57:19,823 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,1,1733842638953.32558485cb37ace42cbc3671b13298ba. for snapshot-testExportExpiredSnapshot completed. 2024-12-10T14:57:19,823 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,1,1733842638953.32558485cb37ace42cbc3671b13298ba.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-12-10T14:57:19,823 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-10T14:57:19,823 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testExportExpiredSnapshot/32558485cb37ace42cbc3671b13298ba/cf/9486f807c1d040e6be764b3d98f762c2] hfiles 2024-12-10T14:57:19,823 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testExportExpiredSnapshot/32558485cb37ace42cbc3671b13298ba/cf/9486f807c1d040e6be764b3d98f762c2 for snapshot=snapshot-testExportExpiredSnapshot 2024-12-10T14:57:19,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.HMaster(4106): Remote procedure done, pid=153 2024-12-10T14:57:19,824 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region 3d3c5d37b59f3a27bd963f36e8591d10 2024-12-10T14:57:19,824 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=153, ppid=152, state=RUNNABLE; SnapshotRegionProcedure 3d3c5d37b59f3a27bd963f36e8591d10 2024-12-10T14:57:19,826 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=153, ppid=152, state=SUCCESS; SnapshotRegionProcedure 3d3c5d37b59f3a27bd963f36e8591d10 in 203 msec 2024-12-10T14:57:19,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742184_1360 (size=103) 2024-12-10T14:57:19,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742184_1360 (size=103) 2024-12-10T14:57:19,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742184_1360 (size=103) 2024-12-10T14:57:19,830 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,1,1733842638953.32558485cb37ace42cbc3671b13298ba. 2024-12-10T14:57:19,830 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=154 2024-12-10T14:57:19,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.HMaster(4106): Remote procedure done, pid=154 2024-12-10T14:57:19,832 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region 32558485cb37ace42cbc3671b13298ba 2024-12-10T14:57:19,832 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=154, ppid=152, state=RUNNABLE; SnapshotRegionProcedure 32558485cb37ace42cbc3671b13298ba 2024-12-10T14:57:19,834 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=154, resume processing ppid=152 2024-12-10T14:57:19,834 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-10T14:57:19,834 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=154, ppid=152, state=SUCCESS; SnapshotRegionProcedure 32558485cb37ace42cbc3671b13298ba in 212 msec 2024-12-10T14:57:19,835 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-10T14:57:19,835 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-10T14:57:19,835 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snapshot-testExportExpiredSnapshot 2024-12-10T14:57:19,836 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-12-10T14:57:19,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742185_1361 (size=609) 2024-12-10T14:57:19,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742185_1361 (size=609) 2024-12-10T14:57:19,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742185_1361 (size=609) 2024-12-10T14:57:19,846 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-10T14:57:19,850 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-10T14:57:19,850 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/snapshot-testExportExpiredSnapshot 2024-12-10T14:57:19,851 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_POST_OPERATION 2024-12-10T14:57:19,851 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 152 2024-12-10T14:57:19,852 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=152, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } in 241 msec 2024-12-10T14:57:19,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-10T14:57:19,914 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testExportExpiredSnapshot, procId: 152 completed 2024-12-10T14:57:20,358 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733842510486_0006_000001 (auth:SIMPLE) from 127.0.0.1:46156 2024-12-10T14:57:20,369 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-1_2/usercache/jenkins/appcache/application_1733842510486_0006/container_1733842510486_0006_01_000001/launch_container.sh] 2024-12-10T14:57:20,370 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-1_2/usercache/jenkins/appcache/application_1733842510486_0006/container_1733842510486_0006_01_000001/container_tokens] 2024-12-10T14:57:20,370 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-1_2/usercache/jenkins/appcache/application_1733842510486_0006/container_1733842510486_0006_01_000001/sysfs] 2024-12-10T14:57:22,002 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-10T14:57:22,592 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-12-10T14:57:22,592 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-12-10T14:57:22,593 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-12-10T14:57:22,593 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-12-10T14:57:22,594 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-10T14:57:22,594 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-12-10T14:57:28,095 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-10T14:57:29,922 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842649921 2024-12-10T14:57:29,922 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:36449, tgtDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842649921, rawTgtDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842649921, srcFsUri=hdfs://localhost:36449, srcDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef 2024-12-10T14:57:29,950 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:36449, inputRoot=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef 2024-12-10T14:57:29,950 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_996159037_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842649921, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842649921/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-12-10T14:57:29,953 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-10T14:57:29,954 ERROR [Time-limited test {}] util.AbstractHBaseTool(153): Error running command-line tool org.apache.hadoop.hbase.snapshot.SnapshotTTLExpiredException: TTL for snapshot 'snapshot-testExportExpiredSnapshot' has already expired. at org.apache.hadoop.hbase.snapshot.ExportSnapshot.verifySnapshot(ExportSnapshot.java:948) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1093) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:151) ~[hbase-common-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:523) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportExpiredSnapshot(TestExportSnapshot.java:315) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:57:29,955 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportExpiredSnapshot 2024-12-10T14:57:29,956 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportExpiredSnapshot 2024-12-10T14:57:29,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] procedure2.ProcedureExecutor(1098): Stored pid=155, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-10T14:57:29,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-10T14:57:29,958 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733842649958"}]},"ts":"1733842649958"} 2024-12-10T14:57:29,959 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLING in hbase:meta 2024-12-10T14:57:29,961 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testExportExpiredSnapshot to state=DISABLING 2024-12-10T14:57:29,961 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=156, ppid=155, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot}] 2024-12-10T14:57:29,962 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=157, ppid=156, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=deff4b68b658211e9f80a2e5cdd7e84e, UNASSIGN}, {pid=158, ppid=156, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=d07239f99c0a8b2263359f08d82bc5f8, UNASSIGN}] 2024-12-10T14:57:29,963 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=158, ppid=156, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=d07239f99c0a8b2263359f08d82bc5f8, UNASSIGN 2024-12-10T14:57:29,963 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=157, ppid=156, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=deff4b68b658211e9f80a2e5cdd7e84e, UNASSIGN 2024-12-10T14:57:29,964 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=158 updating hbase:meta row=d07239f99c0a8b2263359f08d82bc5f8, regionState=CLOSING, regionLocation=db1d50717577,41775,1733842502887 2024-12-10T14:57:29,964 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=157 updating hbase:meta row=deff4b68b658211e9f80a2e5cdd7e84e, regionState=CLOSING, regionLocation=db1d50717577,37707,1733842502956 2024-12-10T14:57:29,965 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-10T14:57:29,965 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=159, ppid=158, state=RUNNABLE; CloseRegionProcedure d07239f99c0a8b2263359f08d82bc5f8, server=db1d50717577,41775,1733842502887}] 2024-12-10T14:57:29,965 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-10T14:57:29,965 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=160, ppid=157, state=RUNNABLE; CloseRegionProcedure deff4b68b658211e9f80a2e5cdd7e84e, server=db1d50717577,37707,1733842502956}] 2024-12-10T14:57:30,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-10T14:57:30,116 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,41775,1733842502887 2024-12-10T14:57:30,117 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] handler.UnassignRegionHandler(124): Close d07239f99c0a8b2263359f08d82bc5f8 2024-12-10T14:57:30,117 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-10T14:57:30,117 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1681): Closing d07239f99c0a8b2263359f08d82bc5f8, disabling compactions & flushes 2024-12-10T14:57:30,117 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1703): Closing region testtb-testExportExpiredSnapshot,1,1733842637253.d07239f99c0a8b2263359f08d82bc5f8. 2024-12-10T14:57:30,117 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportExpiredSnapshot,1,1733842637253.d07239f99c0a8b2263359f08d82bc5f8. 2024-12-10T14:57:30,117 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportExpiredSnapshot,1,1733842637253.d07239f99c0a8b2263359f08d82bc5f8. after waiting 0 ms 2024-12-10T14:57:30,117 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportExpiredSnapshot,1,1733842637253.d07239f99c0a8b2263359f08d82bc5f8. 2024-12-10T14:57:30,117 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,37707,1733842502956 2024-12-10T14:57:30,117 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(124): Close deff4b68b658211e9f80a2e5cdd7e84e 2024-12-10T14:57:30,118 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-10T14:57:30,118 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1681): Closing deff4b68b658211e9f80a2e5cdd7e84e, disabling compactions & flushes 2024-12-10T14:57:30,118 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1703): Closing region testtb-testExportExpiredSnapshot,,1733842637253.deff4b68b658211e9f80a2e5cdd7e84e. 2024-12-10T14:57:30,118 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportExpiredSnapshot,,1733842637253.deff4b68b658211e9f80a2e5cdd7e84e. 2024-12-10T14:57:30,118 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportExpiredSnapshot,,1733842637253.deff4b68b658211e9f80a2e5cdd7e84e. after waiting 0 ms 2024-12-10T14:57:30,118 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportExpiredSnapshot,,1733842637253.deff4b68b658211e9f80a2e5cdd7e84e. 2024-12-10T14:57:30,122 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportExpiredSnapshot/d07239f99c0a8b2263359f08d82bc5f8/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-10T14:57:30,122 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportExpiredSnapshot/deff4b68b658211e9f80a2e5cdd7e84e/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-10T14:57:30,122 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-10T14:57:30,122 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-10T14:57:30,122 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1922): Closed testtb-testExportExpiredSnapshot,1,1733842637253.d07239f99c0a8b2263359f08d82bc5f8. 2024-12-10T14:57:30,122 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1635): Region close journal for d07239f99c0a8b2263359f08d82bc5f8: 2024-12-10T14:57:30,122 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1922): Closed testtb-testExportExpiredSnapshot,,1733842637253.deff4b68b658211e9f80a2e5cdd7e84e. 2024-12-10T14:57:30,122 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1635): Region close journal for deff4b68b658211e9f80a2e5cdd7e84e: 2024-12-10T14:57:30,124 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(170): Closed deff4b68b658211e9f80a2e5cdd7e84e 2024-12-10T14:57:30,124 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=157 updating hbase:meta row=deff4b68b658211e9f80a2e5cdd7e84e, regionState=CLOSED 2024-12-10T14:57:30,124 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] handler.UnassignRegionHandler(170): Closed d07239f99c0a8b2263359f08d82bc5f8 2024-12-10T14:57:30,125 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=158 updating hbase:meta row=d07239f99c0a8b2263359f08d82bc5f8, regionState=CLOSED 2024-12-10T14:57:30,126 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=160, resume processing ppid=157 2024-12-10T14:57:30,127 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=160, ppid=157, state=SUCCESS; CloseRegionProcedure deff4b68b658211e9f80a2e5cdd7e84e, server=db1d50717577,37707,1733842502956 in 160 msec 2024-12-10T14:57:30,127 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=159, resume processing ppid=158 2024-12-10T14:57:30,127 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=159, ppid=158, state=SUCCESS; CloseRegionProcedure d07239f99c0a8b2263359f08d82bc5f8, server=db1d50717577,41775,1733842502887 in 161 msec 2024-12-10T14:57:30,128 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=157, ppid=156, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=deff4b68b658211e9f80a2e5cdd7e84e, UNASSIGN in 165 msec 2024-12-10T14:57:30,128 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=158, resume processing ppid=156 2024-12-10T14:57:30,129 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=158, ppid=156, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=d07239f99c0a8b2263359f08d82bc5f8, UNASSIGN in 165 msec 2024-12-10T14:57:30,130 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=156, resume processing ppid=155 2024-12-10T14:57:30,130 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=156, ppid=155, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot in 168 msec 2024-12-10T14:57:30,131 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733842650131"}]},"ts":"1733842650131"} 2024-12-10T14:57:30,132 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLED in hbase:meta 2024-12-10T14:57:30,133 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testExportExpiredSnapshot to state=DISABLED 2024-12-10T14:57:30,135 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=155, state=SUCCESS; DisableTableProcedure table=testtb-testExportExpiredSnapshot in 178 msec 2024-12-10T14:57:30,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-10T14:57:30,260 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportExpiredSnapshot, procId: 155 completed 2024-12-10T14:57:30,260 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportExpiredSnapshot 2024-12-10T14:57:30,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] procedure2.ProcedureExecutor(1098): Stored pid=161, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-10T14:57:30,262 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=161, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-10T14:57:30,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportExpiredSnapshot 2024-12-10T14:57:30,262 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=161, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-10T14:57:30,263 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41775 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportExpiredSnapshot 2024-12-10T14:57:30,265 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportExpiredSnapshot/deff4b68b658211e9f80a2e5cdd7e84e 2024-12-10T14:57:30,265 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportExpiredSnapshot/d07239f99c0a8b2263359f08d82bc5f8 2024-12-10T14:57:30,267 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportExpiredSnapshot/deff4b68b658211e9f80a2e5cdd7e84e/cf, FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportExpiredSnapshot/deff4b68b658211e9f80a2e5cdd7e84e/recovered.edits] 2024-12-10T14:57:30,267 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportExpiredSnapshot/d07239f99c0a8b2263359f08d82bc5f8/cf, FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportExpiredSnapshot/d07239f99c0a8b2263359f08d82bc5f8/recovered.edits] 2024-12-10T14:57:30,268 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-10T14:57:30,268 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45255-0x1019cdee8310003, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-10T14:57:30,268 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-10T14:57:30,268 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-10T14:57:30,269 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-10T14:57:30,269 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-10T14:57:30,269 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-10T14:57:30,270 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-10T14:57:30,270 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-10T14:57:30,270 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:57:30,270 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-10T14:57:30,270 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:57:30,270 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:57:30,270 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45255-0x1019cdee8310003, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:57:30,270 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data null 2024-12-10T14:57:30,270 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(135): Skipping permission cache refresh because writable data is empty 2024-12-10T14:57:30,271 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-10T14:57:30,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-10T14:57:30,271 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-10T14:57:30,271 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-10T14:57:30,271 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-10T14:57:30,272 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportExpiredSnapshot/deff4b68b658211e9f80a2e5cdd7e84e/cf/002d36ac35fb4f8fae840cec94c520f8 to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/archive/data/default/testtb-testExportExpiredSnapshot/deff4b68b658211e9f80a2e5cdd7e84e/cf/002d36ac35fb4f8fae840cec94c520f8 2024-12-10T14:57:30,272 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportExpiredSnapshot/d07239f99c0a8b2263359f08d82bc5f8/cf/65c5e3c6b7c54a22828818526f234207 to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/archive/data/default/testtb-testExportExpiredSnapshot/d07239f99c0a8b2263359f08d82bc5f8/cf/65c5e3c6b7c54a22828818526f234207 2024-12-10T14:57:30,275 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportExpiredSnapshot/deff4b68b658211e9f80a2e5cdd7e84e/recovered.edits/9.seqid to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/archive/data/default/testtb-testExportExpiredSnapshot/deff4b68b658211e9f80a2e5cdd7e84e/recovered.edits/9.seqid 2024-12-10T14:57:30,275 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportExpiredSnapshot/d07239f99c0a8b2263359f08d82bc5f8/recovered.edits/9.seqid to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/archive/data/default/testtb-testExportExpiredSnapshot/d07239f99c0a8b2263359f08d82bc5f8/recovered.edits/9.seqid 2024-12-10T14:57:30,275 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportExpiredSnapshot/deff4b68b658211e9f80a2e5cdd7e84e 2024-12-10T14:57:30,275 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportExpiredSnapshot/d07239f99c0a8b2263359f08d82bc5f8 2024-12-10T14:57:30,275 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportExpiredSnapshot regions 2024-12-10T14:57:30,277 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=161, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-10T14:57:30,279 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportExpiredSnapshot from hbase:meta 2024-12-10T14:57:30,282 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportExpiredSnapshot' descriptor. 2024-12-10T14:57:30,283 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=161, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-10T14:57:30,283 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportExpiredSnapshot' from region states. 2024-12-10T14:57:30,283 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,,1733842637253.deff4b68b658211e9f80a2e5cdd7e84e.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733842650283"}]},"ts":"9223372036854775807"} 2024-12-10T14:57:30,283 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,1,1733842637253.d07239f99c0a8b2263359f08d82bc5f8.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733842650283"}]},"ts":"9223372036854775807"} 2024-12-10T14:57:30,285 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-10T14:57:30,285 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => deff4b68b658211e9f80a2e5cdd7e84e, NAME => 'testtb-testExportExpiredSnapshot,,1733842637253.deff4b68b658211e9f80a2e5cdd7e84e.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => d07239f99c0a8b2263359f08d82bc5f8, NAME => 'testtb-testExportExpiredSnapshot,1,1733842637253.d07239f99c0a8b2263359f08d82bc5f8.', STARTKEY => '1', ENDKEY => ''}] 2024-12-10T14:57:30,285 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportExpiredSnapshot' as deleted. 2024-12-10T14:57:30,285 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733842650285"}]},"ts":"9223372036854775807"} 2024-12-10T14:57:30,286 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportExpiredSnapshot state from META 2024-12-10T14:57:30,288 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=161, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-10T14:57:30,289 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=161, state=SUCCESS; DeleteTableProcedure table=testtb-testExportExpiredSnapshot in 27 msec 2024-12-10T14:57:30,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-10T14:57:30,372 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportExpiredSnapshot, procId: 161 completed 2024-12-10T14:57:30,380 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportExpiredSnapshot" 2024-12-10T14:57:30,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportExpiredSnapshot 2024-12-10T14:57:30,382 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snapshot-testExportExpiredSnapshot" 2024-12-10T14:57:30,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotManager(380): Deleting snapshot: snapshot-testExportExpiredSnapshot 2024-12-10T14:57:30,385 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportExpiredSnapshot" 2024-12-10T14:57:30,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportExpiredSnapshot 2024-12-10T14:57:30,405 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportExpiredSnapshot Thread=801 (was 811), OpenFileDescriptor=795 (was 809), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=463 (was 547), ProcessCount=11 (was 20), AvailableMemoryMB=1423 (was 324) - AvailableMemoryMB LEAK? - 2024-12-10T14:57:30,405 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=801 is superior to 500 2024-12-10T14:57:30,423 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testEmptyExportFileSystemState Thread=801, OpenFileDescriptor=795, MaxFileDescriptor=1048576, SystemLoadAverage=463, ProcessCount=11, AvailableMemoryMB=1422 2024-12-10T14:57:30,423 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=801 is superior to 500 2024-12-10T14:57:30,425 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-10T14:57:30,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] procedure2.ProcedureExecutor(1098): Stored pid=162, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-10T14:57:30,426 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=162, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-12-10T14:57:30,426 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:57:30,426 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testEmptyExportFileSystemState" procId is: 162 2024-12-10T14:57:30,427 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=162, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-10T14:57:30,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=162 2024-12-10T14:57:30,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742186_1362 (size=412) 2024-12-10T14:57:30,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742186_1362 (size=412) 2024-12-10T14:57:30,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742186_1362 (size=412) 2024-12-10T14:57:30,435 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 2e4a0ab714e6ae8babc774cb4dc9d26d, NAME => 'testtb-testEmptyExportFileSystemState,,1733842650424.2e4a0ab714e6ae8babc774cb4dc9d26d.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef 2024-12-10T14:57:30,435 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => dfddc49fb9e56046b2f7ae9bd1f21878, NAME => 'testtb-testEmptyExportFileSystemState,1,1733842650424.dfddc49fb9e56046b2f7ae9bd1f21878.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef 2024-12-10T14:57:30,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742187_1363 (size=73) 2024-12-10T14:57:30,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742188_1364 (size=73) 2024-12-10T14:57:30,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742187_1363 (size=73) 2024-12-10T14:57:30,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742187_1363 (size=73) 2024-12-10T14:57:30,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742188_1364 (size=73) 2024-12-10T14:57:30,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742188_1364 (size=73) 2024-12-10T14:57:30,451 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testEmptyExportFileSystemState,,1733842650424.2e4a0ab714e6ae8babc774cb4dc9d26d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:57:30,451 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1681): Closing 2e4a0ab714e6ae8babc774cb4dc9d26d, disabling compactions & flushes 2024-12-10T14:57:30,451 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testEmptyExportFileSystemState,,1733842650424.2e4a0ab714e6ae8babc774cb4dc9d26d. 2024-12-10T14:57:30,451 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testEmptyExportFileSystemState,,1733842650424.2e4a0ab714e6ae8babc774cb4dc9d26d. 2024-12-10T14:57:30,451 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testEmptyExportFileSystemState,,1733842650424.2e4a0ab714e6ae8babc774cb4dc9d26d. after waiting 0 ms 2024-12-10T14:57:30,451 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testEmptyExportFileSystemState,,1733842650424.2e4a0ab714e6ae8babc774cb4dc9d26d. 2024-12-10T14:57:30,451 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testEmptyExportFileSystemState,,1733842650424.2e4a0ab714e6ae8babc774cb4dc9d26d. 2024-12-10T14:57:30,451 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testEmptyExportFileSystemState,1,1733842650424.dfddc49fb9e56046b2f7ae9bd1f21878.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:57:30,452 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1635): Region close journal for 2e4a0ab714e6ae8babc774cb4dc9d26d: 2024-12-10T14:57:30,452 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1681): Closing dfddc49fb9e56046b2f7ae9bd1f21878, disabling compactions & flushes 2024-12-10T14:57:30,452 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testEmptyExportFileSystemState,1,1733842650424.dfddc49fb9e56046b2f7ae9bd1f21878. 2024-12-10T14:57:30,452 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testEmptyExportFileSystemState,1,1733842650424.dfddc49fb9e56046b2f7ae9bd1f21878. 2024-12-10T14:57:30,452 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1733842650424.dfddc49fb9e56046b2f7ae9bd1f21878. after waiting 0 ms 2024-12-10T14:57:30,452 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1733842650424.dfddc49fb9e56046b2f7ae9bd1f21878. 2024-12-10T14:57:30,452 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testEmptyExportFileSystemState,1,1733842650424.dfddc49fb9e56046b2f7ae9bd1f21878. 2024-12-10T14:57:30,452 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1635): Region close journal for dfddc49fb9e56046b2f7ae9bd1f21878: 2024-12-10T14:57:30,453 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=162, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-12-10T14:57:30,453 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,,1733842650424.2e4a0ab714e6ae8babc774cb4dc9d26d.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1733842650453"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733842650453"}]},"ts":"1733842650453"} 2024-12-10T14:57:30,453 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,1,1733842650424.dfddc49fb9e56046b2f7ae9bd1f21878.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1733842650453"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733842650453"}]},"ts":"1733842650453"} 2024-12-10T14:57:30,455 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-10T14:57:30,456 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=162, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-10T14:57:30,456 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733842650456"}]},"ts":"1733842650456"} 2024-12-10T14:57:30,457 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLING in hbase:meta 2024-12-10T14:57:30,461 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(202): Hosts are {db1d50717577=0} racks are {/default-rack=0} 2024-12-10T14:57:30,462 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-10T14:57:30,462 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-10T14:57:30,462 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-10T14:57:30,462 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-10T14:57:30,462 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-10T14:57:30,462 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-10T14:57:30,462 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-10T14:57:30,463 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=163, ppid=162, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=2e4a0ab714e6ae8babc774cb4dc9d26d, ASSIGN}, {pid=164, ppid=162, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=dfddc49fb9e56046b2f7ae9bd1f21878, ASSIGN}] 2024-12-10T14:57:30,464 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=164, ppid=162, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=dfddc49fb9e56046b2f7ae9bd1f21878, ASSIGN 2024-12-10T14:57:30,464 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=163, ppid=162, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=2e4a0ab714e6ae8babc774cb4dc9d26d, ASSIGN 2024-12-10T14:57:30,464 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=164, ppid=162, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=dfddc49fb9e56046b2f7ae9bd1f21878, ASSIGN; state=OFFLINE, location=db1d50717577,41775,1733842502887; forceNewPlan=false, retain=false 2024-12-10T14:57:30,464 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=163, ppid=162, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=2e4a0ab714e6ae8babc774cb4dc9d26d, ASSIGN; state=OFFLINE, location=db1d50717577,45255,1733842503015; forceNewPlan=false, retain=false 2024-12-10T14:57:30,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=162 2024-12-10T14:57:30,614 INFO [db1d50717577:36533 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-10T14:57:30,615 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=164 updating hbase:meta row=dfddc49fb9e56046b2f7ae9bd1f21878, regionState=OPENING, regionLocation=db1d50717577,41775,1733842502887 2024-12-10T14:57:30,615 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=163 updating hbase:meta row=2e4a0ab714e6ae8babc774cb4dc9d26d, regionState=OPENING, regionLocation=db1d50717577,45255,1733842503015 2024-12-10T14:57:30,616 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=165, ppid=163, state=RUNNABLE; OpenRegionProcedure 2e4a0ab714e6ae8babc774cb4dc9d26d, server=db1d50717577,45255,1733842503015}] 2024-12-10T14:57:30,617 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=166, ppid=164, state=RUNNABLE; OpenRegionProcedure dfddc49fb9e56046b2f7ae9bd1f21878, server=db1d50717577,41775,1733842502887}] 2024-12-10T14:57:30,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=162 2024-12-10T14:57:30,768 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,41775,1733842502887 2024-12-10T14:57:30,768 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,45255,1733842503015 2024-12-10T14:57:30,771 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] handler.AssignRegionHandler(135): Open testtb-testEmptyExportFileSystemState,1,1733842650424.dfddc49fb9e56046b2f7ae9bd1f21878. 2024-12-10T14:57:30,771 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(7285): Opening region: {ENCODED => dfddc49fb9e56046b2f7ae9bd1f21878, NAME => 'testtb-testEmptyExportFileSystemState,1,1733842650424.dfddc49fb9e56046b2f7ae9bd1f21878.', STARTKEY => '1', ENDKEY => ''} 2024-12-10T14:57:30,771 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] handler.AssignRegionHandler(135): Open testtb-testEmptyExportFileSystemState,,1733842650424.2e4a0ab714e6ae8babc774cb4dc9d26d. 2024-12-10T14:57:30,771 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,1,1733842650424.dfddc49fb9e56046b2f7ae9bd1f21878. service=AccessControlService 2024-12-10T14:57:30,772 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(7285): Opening region: {ENCODED => 2e4a0ab714e6ae8babc774cb4dc9d26d, NAME => 'testtb-testEmptyExportFileSystemState,,1733842650424.2e4a0ab714e6ae8babc774cb4dc9d26d.', STARTKEY => '', ENDKEY => '1'} 2024-12-10T14:57:30,772 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-10T14:57:30,772 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,,1733842650424.2e4a0ab714e6ae8babc774cb4dc9d26d. service=AccessControlService 2024-12-10T14:57:30,772 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState dfddc49fb9e56046b2f7ae9bd1f21878 2024-12-10T14:57:30,772 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(894): Instantiated testtb-testEmptyExportFileSystemState,1,1733842650424.dfddc49fb9e56046b2f7ae9bd1f21878.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:57:30,772 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-10T14:57:30,772 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(7327): checking encryption for dfddc49fb9e56046b2f7ae9bd1f21878 2024-12-10T14:57:30,772 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(7330): checking classloading for dfddc49fb9e56046b2f7ae9bd1f21878 2024-12-10T14:57:30,772 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState 2e4a0ab714e6ae8babc774cb4dc9d26d 2024-12-10T14:57:30,772 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(894): Instantiated testtb-testEmptyExportFileSystemState,,1733842650424.2e4a0ab714e6ae8babc774cb4dc9d26d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:57:30,772 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(7327): checking encryption for 2e4a0ab714e6ae8babc774cb4dc9d26d 2024-12-10T14:57:30,772 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(7330): checking classloading for 2e4a0ab714e6ae8babc774cb4dc9d26d 2024-12-10T14:57:30,773 INFO [StoreOpener-dfddc49fb9e56046b2f7ae9bd1f21878-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region dfddc49fb9e56046b2f7ae9bd1f21878 2024-12-10T14:57:30,773 INFO [StoreOpener-2e4a0ab714e6ae8babc774cb4dc9d26d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 2e4a0ab714e6ae8babc774cb4dc9d26d 2024-12-10T14:57:30,774 INFO [StoreOpener-dfddc49fb9e56046b2f7ae9bd1f21878-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region dfddc49fb9e56046b2f7ae9bd1f21878 columnFamilyName cf 2024-12-10T14:57:30,774 INFO [StoreOpener-2e4a0ab714e6ae8babc774cb4dc9d26d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2e4a0ab714e6ae8babc774cb4dc9d26d columnFamilyName cf 2024-12-10T14:57:30,775 DEBUG [StoreOpener-2e4a0ab714e6ae8babc774cb4dc9d26d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:57:30,775 DEBUG [StoreOpener-dfddc49fb9e56046b2f7ae9bd1f21878-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:57:30,775 INFO [StoreOpener-dfddc49fb9e56046b2f7ae9bd1f21878-1 {}] regionserver.HStore(327): Store=dfddc49fb9e56046b2f7ae9bd1f21878/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T14:57:30,775 INFO [StoreOpener-2e4a0ab714e6ae8babc774cb4dc9d26d-1 {}] regionserver.HStore(327): Store=2e4a0ab714e6ae8babc774cb4dc9d26d/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T14:57:30,776 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testEmptyExportFileSystemState/dfddc49fb9e56046b2f7ae9bd1f21878 2024-12-10T14:57:30,776 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testEmptyExportFileSystemState/2e4a0ab714e6ae8babc774cb4dc9d26d 2024-12-10T14:57:30,776 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testEmptyExportFileSystemState/dfddc49fb9e56046b2f7ae9bd1f21878 2024-12-10T14:57:30,776 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testEmptyExportFileSystemState/2e4a0ab714e6ae8babc774cb4dc9d26d 2024-12-10T14:57:30,778 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(1085): writing seq id for 2e4a0ab714e6ae8babc774cb4dc9d26d 2024-12-10T14:57:30,778 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(1085): writing seq id for dfddc49fb9e56046b2f7ae9bd1f21878 2024-12-10T14:57:30,780 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testEmptyExportFileSystemState/dfddc49fb9e56046b2f7ae9bd1f21878/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T14:57:30,780 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testEmptyExportFileSystemState/2e4a0ab714e6ae8babc774cb4dc9d26d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T14:57:30,780 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(1102): Opened dfddc49fb9e56046b2f7ae9bd1f21878; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68577306, jitterRate=0.02188149094581604}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-10T14:57:30,780 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(1102): Opened 2e4a0ab714e6ae8babc774cb4dc9d26d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64360719, jitterRate=-0.04095055162906647}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-10T14:57:30,781 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(1001): Region open journal for dfddc49fb9e56046b2f7ae9bd1f21878: 2024-12-10T14:57:30,781 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(1001): Region open journal for 2e4a0ab714e6ae8babc774cb4dc9d26d: 2024-12-10T14:57:30,782 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testEmptyExportFileSystemState,,1733842650424.2e4a0ab714e6ae8babc774cb4dc9d26d., pid=165, masterSystemTime=1733842650768 2024-12-10T14:57:30,782 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testEmptyExportFileSystemState,1,1733842650424.dfddc49fb9e56046b2f7ae9bd1f21878., pid=166, masterSystemTime=1733842650768 2024-12-10T14:57:30,783 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testEmptyExportFileSystemState,1,1733842650424.dfddc49fb9e56046b2f7ae9bd1f21878. 2024-12-10T14:57:30,783 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] handler.AssignRegionHandler(164): Opened testtb-testEmptyExportFileSystemState,1,1733842650424.dfddc49fb9e56046b2f7ae9bd1f21878. 2024-12-10T14:57:30,784 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=164 updating hbase:meta row=dfddc49fb9e56046b2f7ae9bd1f21878, regionState=OPEN, openSeqNum=2, regionLocation=db1d50717577,41775,1733842502887 2024-12-10T14:57:30,784 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testEmptyExportFileSystemState,,1733842650424.2e4a0ab714e6ae8babc774cb4dc9d26d. 2024-12-10T14:57:30,784 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] handler.AssignRegionHandler(164): Opened testtb-testEmptyExportFileSystemState,,1733842650424.2e4a0ab714e6ae8babc774cb4dc9d26d. 2024-12-10T14:57:30,784 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=163 updating hbase:meta row=2e4a0ab714e6ae8babc774cb4dc9d26d, regionState=OPEN, openSeqNum=2, regionLocation=db1d50717577,45255,1733842503015 2024-12-10T14:57:30,787 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=166, resume processing ppid=164 2024-12-10T14:57:30,787 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=166, ppid=164, state=SUCCESS; OpenRegionProcedure dfddc49fb9e56046b2f7ae9bd1f21878, server=db1d50717577,41775,1733842502887 in 168 msec 2024-12-10T14:57:30,788 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=165, resume processing ppid=163 2024-12-10T14:57:30,788 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=164, ppid=162, state=SUCCESS; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=dfddc49fb9e56046b2f7ae9bd1f21878, ASSIGN in 324 msec 2024-12-10T14:57:30,788 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=165, ppid=163, state=SUCCESS; OpenRegionProcedure 2e4a0ab714e6ae8babc774cb4dc9d26d, server=db1d50717577,45255,1733842503015 in 170 msec 2024-12-10T14:57:30,789 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=163, resume processing ppid=162 2024-12-10T14:57:30,789 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=163, ppid=162, state=SUCCESS; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=2e4a0ab714e6ae8babc774cb4dc9d26d, ASSIGN in 325 msec 2024-12-10T14:57:30,790 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=162, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-10T14:57:30,790 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733842650790"}]},"ts":"1733842650790"} 2024-12-10T14:57:30,791 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLED in hbase:meta 2024-12-10T14:57:30,793 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=162, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-12-10T14:57:30,793 DEBUG [PEWorker-1 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testEmptyExportFileSystemState jenkins: RWXCA 2024-12-10T14:57:30,795 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41775 {}] access.PermissionStorage(611): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-10T14:57:30,804 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:57:30,804 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:57:30,804 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45255-0x1019cdee8310003, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:57:30,804 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:57:30,806 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-10T14:57:30,806 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-10T14:57:30,806 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-10T14:57:30,806 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-10T14:57:30,806 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-10T14:57:30,806 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-10T14:57:30,806 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-10T14:57:30,806 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-10T14:57:30,807 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=162, state=SUCCESS; CreateTableProcedure table=testtb-testEmptyExportFileSystemState in 380 msec 2024-12-10T14:57:30,924 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-10T14:57:31,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=162 2024-12-10T14:57:31,030 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testEmptyExportFileSystemState, procId: 162 completed 2024-12-10T14:57:31,030 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testEmptyExportFileSystemState get assigned. Timeout = 60000ms 2024-12-10T14:57:31,030 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-10T14:57:31,033 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testEmptyExportFileSystemState assigned to meta. Checking AM states. 2024-12-10T14:57:31,033 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-10T14:57:31,034 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testEmptyExportFileSystemState assigned. 2024-12-10T14:57:31,036 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-10T14:57:31,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733842651036 (current time:1733842651036). 2024-12-10T14:57:31,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-10T14:57:31,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-12-10T14:57:31,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-10T14:57:31,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x11946abf to 127.0.0.1:57176 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@20bb7a7e 2024-12-10T14:57:31,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4bbb292, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:57:31,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:57:31,042 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57402, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:57:31,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x11946abf to 127.0.0.1:57176 2024-12-10T14:57:31,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:57:31,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x38793860 to 127.0.0.1:57176 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@436acd9b 2024-12-10T14:57:31,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1a1cd846, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:57:31,048 DEBUG [hconnection-0x431f9b03-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:57:31,049 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57408, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:57:31,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:57:31,051 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44702, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:57:31,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x38793860 to 127.0.0.1:57176 2024-12-10T14:57:31,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:57:31,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] access.PermissionStorage(611): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-10T14:57:31,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-10T14:57:31,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] procedure2.ProcedureExecutor(1098): Stored pid=167, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-10T14:57:31,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 167 2024-12-10T14:57:31,054 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-10T14:57:31,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-10T14:57:31,055 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-10T14:57:31,057 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-10T14:57:31,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742189_1365 (size=185) 2024-12-10T14:57:31,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742189_1365 (size=185) 2024-12-10T14:57:31,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742189_1365 (size=185) 2024-12-10T14:57:31,063 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-10T14:57:31,063 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=168, ppid=167, state=RUNNABLE; SnapshotRegionProcedure 2e4a0ab714e6ae8babc774cb4dc9d26d}, {pid=169, ppid=167, state=RUNNABLE; SnapshotRegionProcedure dfddc49fb9e56046b2f7ae9bd1f21878}] 2024-12-10T14:57:31,064 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=168, ppid=167, state=RUNNABLE; SnapshotRegionProcedure 2e4a0ab714e6ae8babc774cb4dc9d26d 2024-12-10T14:57:31,064 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=169, ppid=167, state=RUNNABLE; SnapshotRegionProcedure dfddc49fb9e56046b2f7ae9bd1f21878 2024-12-10T14:57:31,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-10T14:57:31,215 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,45255,1733842503015 2024-12-10T14:57:31,215 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,41775,1733842502887 2024-12-10T14:57:31,215 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45255 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=168 2024-12-10T14:57:31,215 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41775 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=169 2024-12-10T14:57:31,215 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1733842650424.dfddc49fb9e56046b2f7ae9bd1f21878. 2024-12-10T14:57:31,215 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1733842650424.2e4a0ab714e6ae8babc774cb4dc9d26d. 2024-12-10T14:57:31,215 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] regionserver.HRegion(2538): Flush status journal for dfddc49fb9e56046b2f7ae9bd1f21878: 2024-12-10T14:57:31,216 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] regionserver.HRegion(2538): Flush status journal for 2e4a0ab714e6ae8babc774cb4dc9d26d: 2024-12-10T14:57:31,216 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1733842650424.dfddc49fb9e56046b2f7ae9bd1f21878. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-12-10T14:57:31,216 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1733842650424.2e4a0ab714e6ae8babc774cb4dc9d26d. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-12-10T14:57:31,216 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1733842650424.dfddc49fb9e56046b2f7ae9bd1f21878.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-12-10T14:57:31,216 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-10T14:57:31,216 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1733842650424.2e4a0ab714e6ae8babc774cb4dc9d26d.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-12-10T14:57:31,216 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-10T14:57:31,216 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-10T14:57:31,216 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-10T14:57:31,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742190_1366 (size=76) 2024-12-10T14:57:31,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742191_1367 (size=76) 2024-12-10T14:57:31,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742191_1367 (size=76) 2024-12-10T14:57:31,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742191_1367 (size=76) 2024-12-10T14:57:31,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742190_1366 (size=76) 2024-12-10T14:57:31,223 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1733842650424.dfddc49fb9e56046b2f7ae9bd1f21878. 2024-12-10T14:57:31,223 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=169 2024-12-10T14:57:31,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742190_1366 (size=76) 2024-12-10T14:57:31,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.HMaster(4106): Remote procedure done, pid=169 2024-12-10T14:57:31,224 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region dfddc49fb9e56046b2f7ae9bd1f21878 2024-12-10T14:57:31,224 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1733842650424.2e4a0ab714e6ae8babc774cb4dc9d26d. 2024-12-10T14:57:31,224 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=169, ppid=167, state=RUNNABLE; SnapshotRegionProcedure dfddc49fb9e56046b2f7ae9bd1f21878 2024-12-10T14:57:31,224 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=168 2024-12-10T14:57:31,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.HMaster(4106): Remote procedure done, pid=168 2024-12-10T14:57:31,224 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region 2e4a0ab714e6ae8babc774cb4dc9d26d 2024-12-10T14:57:31,225 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=168, ppid=167, state=RUNNABLE; SnapshotRegionProcedure 2e4a0ab714e6ae8babc774cb4dc9d26d 2024-12-10T14:57:31,226 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=169, ppid=167, state=SUCCESS; SnapshotRegionProcedure dfddc49fb9e56046b2f7ae9bd1f21878 in 162 msec 2024-12-10T14:57:31,227 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=168, resume processing ppid=167 2024-12-10T14:57:31,227 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-10T14:57:31,227 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=168, ppid=167, state=SUCCESS; SnapshotRegionProcedure 2e4a0ab714e6ae8babc774cb4dc9d26d in 162 msec 2024-12-10T14:57:31,227 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-10T14:57:31,228 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-10T14:57:31,228 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testEmptyExportFileSystemState 2024-12-10T14:57:31,229 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-10T14:57:31,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742192_1368 (size=567) 2024-12-10T14:57:31,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742192_1368 (size=567) 2024-12-10T14:57:31,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742192_1368 (size=567) 2024-12-10T14:57:31,238 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-10T14:57:31,242 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-10T14:57:31,242 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-10T14:57:31,244 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-10T14:57:31,244 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 167 2024-12-10T14:57:31,245 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 191 msec 2024-12-10T14:57:31,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-10T14:57:31,356 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState, procId: 167 completed 2024-12-10T14:57:31,362 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45255 {}] regionserver.HRegion(8254): writing data to region testtb-testEmptyExportFileSystemState,,1733842650424.2e4a0ab714e6ae8babc774cb4dc9d26d. with WAL disabled. Data may be lost in the event of a crash. 2024-12-10T14:57:31,363 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41775 {}] regionserver.HRegion(8254): writing data to region testtb-testEmptyExportFileSystemState,1,1733842650424.dfddc49fb9e56046b2f7ae9bd1f21878. with WAL disabled. Data may be lost in the event of a crash. 2024-12-10T14:57:31,366 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testEmptyExportFileSystemState 2024-12-10T14:57:31,366 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testEmptyExportFileSystemState,,1733842650424.2e4a0ab714e6ae8babc774cb4dc9d26d. 2024-12-10T14:57:31,366 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-10T14:57:31,375 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-10T14:57:31,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733842651375 (current time:1733842651375). 2024-12-10T14:57:31,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-10T14:57:31,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-12-10T14:57:31,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-10T14:57:31,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5be536aa to 127.0.0.1:57176 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@641d7956 2024-12-10T14:57:31,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76d2692c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:57:31,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:57:31,380 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57412, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:57:31,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5be536aa to 127.0.0.1:57176 2024-12-10T14:57:31,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:57:31,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1b8f5887 to 127.0.0.1:57176 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@14d74569 2024-12-10T14:57:31,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@71a9c5d6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:57:31,389 DEBUG [hconnection-0x731dee0e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:57:31,390 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57418, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:57:31,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:57:31,391 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44706, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:57:31,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1b8f5887 to 127.0.0.1:57176 2024-12-10T14:57:31,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:57:31,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] access.PermissionStorage(611): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-10T14:57:31,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-10T14:57:31,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] procedure2.ProcedureExecutor(1098): Stored pid=170, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-10T14:57:31,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 170 2024-12-10T14:57:31,394 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-10T14:57:31,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=170 2024-12-10T14:57:31,395 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-10T14:57:31,397 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-10T14:57:31,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742193_1369 (size=180) 2024-12-10T14:57:31,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742193_1369 (size=180) 2024-12-10T14:57:31,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742193_1369 (size=180) 2024-12-10T14:57:31,405 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-10T14:57:31,405 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=171, ppid=170, state=RUNNABLE; SnapshotRegionProcedure 2e4a0ab714e6ae8babc774cb4dc9d26d}, {pid=172, ppid=170, state=RUNNABLE; SnapshotRegionProcedure dfddc49fb9e56046b2f7ae9bd1f21878}] 2024-12-10T14:57:31,406 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=171, ppid=170, state=RUNNABLE; SnapshotRegionProcedure 2e4a0ab714e6ae8babc774cb4dc9d26d 2024-12-10T14:57:31,406 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=172, ppid=170, state=RUNNABLE; SnapshotRegionProcedure dfddc49fb9e56046b2f7ae9bd1f21878 2024-12-10T14:57:31,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=170 2024-12-10T14:57:31,557 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,45255,1733842503015 2024-12-10T14:57:31,557 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,41775,1733842502887 2024-12-10T14:57:31,557 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41775 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=172 2024-12-10T14:57:31,557 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45255 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=171 2024-12-10T14:57:31,558 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1733842650424.dfddc49fb9e56046b2f7ae9bd1f21878. 2024-12-10T14:57:31,558 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1733842650424.2e4a0ab714e6ae8babc774cb4dc9d26d. 2024-12-10T14:57:31,558 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.HRegion(2837): Flushing 2e4a0ab714e6ae8babc774cb4dc9d26d 1/1 column families, dataSize=199 B heapSize=688 B 2024-12-10T14:57:31,558 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.HRegion(2837): Flushing dfddc49fb9e56046b2f7ae9bd1f21878 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-12-10T14:57:31,577 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testEmptyExportFileSystemState/2e4a0ab714e6ae8babc774cb4dc9d26d/.tmp/cf/d6835cf58de340ec8c457db6b7e4ddb7 is 71, key is 022f2d39fbc47e6d37880769c86c67ec/cf:q/1733842651362/Put/seqid=0 2024-12-10T14:57:31,578 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testEmptyExportFileSystemState/dfddc49fb9e56046b2f7ae9bd1f21878/.tmp/cf/4f244166532a4f98997a29b82bac9ab5 is 71, key is 130df4b0f0e80f4790dda1c5d5487e15/cf:q/1733842651363/Put/seqid=0 2024-12-10T14:57:31,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742194_1370 (size=5288) 2024-12-10T14:57:31,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742194_1370 (size=5288) 2024-12-10T14:57:31,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742195_1371 (size=8326) 2024-12-10T14:57:31,585 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=199 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testEmptyExportFileSystemState/2e4a0ab714e6ae8babc774cb4dc9d26d/.tmp/cf/d6835cf58de340ec8c457db6b7e4ddb7 2024-12-10T14:57:31,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742195_1371 (size=8326) 2024-12-10T14:57:31,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742194_1370 (size=5288) 2024-12-10T14:57:31,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742195_1371 (size=8326) 2024-12-10T14:57:31,585 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.06 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testEmptyExportFileSystemState/dfddc49fb9e56046b2f7ae9bd1f21878/.tmp/cf/4f244166532a4f98997a29b82bac9ab5 2024-12-10T14:57:31,590 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testEmptyExportFileSystemState/dfddc49fb9e56046b2f7ae9bd1f21878/.tmp/cf/4f244166532a4f98997a29b82bac9ab5 as hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testEmptyExportFileSystemState/dfddc49fb9e56046b2f7ae9bd1f21878/cf/4f244166532a4f98997a29b82bac9ab5 2024-12-10T14:57:31,590 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testEmptyExportFileSystemState/2e4a0ab714e6ae8babc774cb4dc9d26d/.tmp/cf/d6835cf58de340ec8c457db6b7e4ddb7 as hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testEmptyExportFileSystemState/2e4a0ab714e6ae8babc774cb4dc9d26d/cf/d6835cf58de340ec8c457db6b7e4ddb7 2024-12-10T14:57:31,595 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testEmptyExportFileSystemState/dfddc49fb9e56046b2f7ae9bd1f21878/cf/4f244166532a4f98997a29b82bac9ab5, entries=47, sequenceid=6, filesize=8.1 K 2024-12-10T14:57:31,595 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testEmptyExportFileSystemState/2e4a0ab714e6ae8babc774cb4dc9d26d/cf/d6835cf58de340ec8c457db6b7e4ddb7, entries=3, sequenceid=6, filesize=5.2 K 2024-12-10T14:57:31,596 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.HRegion(3040): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for dfddc49fb9e56046b2f7ae9bd1f21878 in 38ms, sequenceid=6, compaction requested=false 2024-12-10T14:57:31,596 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.HRegion(3040): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for 2e4a0ab714e6ae8babc774cb4dc9d26d in 38ms, sequenceid=6, compaction requested=false 2024-12-10T14:57:31,596 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testEmptyExportFileSystemState' 2024-12-10T14:57:31,596 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testEmptyExportFileSystemState' 2024-12-10T14:57:31,596 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.HRegion(2538): Flush status journal for dfddc49fb9e56046b2f7ae9bd1f21878: 2024-12-10T14:57:31,596 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1733842650424.dfddc49fb9e56046b2f7ae9bd1f21878. for snaptb0-testEmptyExportFileSystemState completed. 2024-12-10T14:57:31,596 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.HRegion(2538): Flush status journal for 2e4a0ab714e6ae8babc774cb4dc9d26d: 2024-12-10T14:57:31,596 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1733842650424.2e4a0ab714e6ae8babc774cb4dc9d26d. for snaptb0-testEmptyExportFileSystemState completed. 2024-12-10T14:57:31,596 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1733842650424.dfddc49fb9e56046b2f7ae9bd1f21878.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-10T14:57:31,596 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1733842650424.2e4a0ab714e6ae8babc774cb4dc9d26d.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-10T14:57:31,596 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-10T14:57:31,597 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-10T14:57:31,597 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testEmptyExportFileSystemState/dfddc49fb9e56046b2f7ae9bd1f21878/cf/4f244166532a4f98997a29b82bac9ab5] hfiles 2024-12-10T14:57:31,597 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testEmptyExportFileSystemState/dfddc49fb9e56046b2f7ae9bd1f21878/cf/4f244166532a4f98997a29b82bac9ab5 for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-10T14:57:31,597 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testEmptyExportFileSystemState/2e4a0ab714e6ae8babc774cb4dc9d26d/cf/d6835cf58de340ec8c457db6b7e4ddb7] hfiles 2024-12-10T14:57:31,597 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testEmptyExportFileSystemState/2e4a0ab714e6ae8babc774cb4dc9d26d/cf/d6835cf58de340ec8c457db6b7e4ddb7 for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-10T14:57:31,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742197_1373 (size=115) 2024-12-10T14:57:31,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742197_1373 (size=115) 2024-12-10T14:57:31,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742197_1373 (size=115) 2024-12-10T14:57:31,606 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1733842650424.2e4a0ab714e6ae8babc774cb4dc9d26d. 2024-12-10T14:57:31,606 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=171 2024-12-10T14:57:31,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.HMaster(4106): Remote procedure done, pid=171 2024-12-10T14:57:31,607 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region 2e4a0ab714e6ae8babc774cb4dc9d26d 2024-12-10T14:57:31,607 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=171, ppid=170, state=RUNNABLE; SnapshotRegionProcedure 2e4a0ab714e6ae8babc774cb4dc9d26d 2024-12-10T14:57:31,609 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=171, ppid=170, state=SUCCESS; SnapshotRegionProcedure 2e4a0ab714e6ae8babc774cb4dc9d26d in 203 msec 2024-12-10T14:57:31,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742196_1372 (size=115) 2024-12-10T14:57:31,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742196_1372 (size=115) 2024-12-10T14:57:31,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742196_1372 (size=115) 2024-12-10T14:57:31,614 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1733842650424.dfddc49fb9e56046b2f7ae9bd1f21878. 2024-12-10T14:57:31,614 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=172 2024-12-10T14:57:31,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.HMaster(4106): Remote procedure done, pid=172 2024-12-10T14:57:31,615 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region dfddc49fb9e56046b2f7ae9bd1f21878 2024-12-10T14:57:31,615 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=172, ppid=170, state=RUNNABLE; SnapshotRegionProcedure dfddc49fb9e56046b2f7ae9bd1f21878 2024-12-10T14:57:31,617 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=172, resume processing ppid=170 2024-12-10T14:57:31,617 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=172, ppid=170, state=SUCCESS; SnapshotRegionProcedure dfddc49fb9e56046b2f7ae9bd1f21878 in 210 msec 2024-12-10T14:57:31,617 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-10T14:57:31,617 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-10T14:57:31,618 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-10T14:57:31,618 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testEmptyExportFileSystemState 2024-12-10T14:57:31,618 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState 2024-12-10T14:57:31,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742198_1374 (size=645) 2024-12-10T14:57:31,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742198_1374 (size=645) 2024-12-10T14:57:31,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742198_1374 (size=645) 2024-12-10T14:57:31,628 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-10T14:57:31,631 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-10T14:57:31,632 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/snaptb0-testEmptyExportFileSystemState 2024-12-10T14:57:31,633 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-10T14:57:31,633 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 170 2024-12-10T14:57:31,634 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=170, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 240 msec 2024-12-10T14:57:31,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=170 2024-12-10T14:57:31,696 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState, procId: 170 completed 2024-12-10T14:57:31,696 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842651696 2024-12-10T14:57:31,696 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:36449, tgtDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842651696, rawTgtDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842651696, srcFsUri=hdfs://localhost:36449, srcDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef 2024-12-10T14:57:31,725 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:36449, inputRoot=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef 2024-12-10T14:57:31,725 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_996159037_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842651696, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842651696/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-10T14:57:31,726 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-10T14:57:31,730 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842651696/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-10T14:57:31,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742199_1375 (size=185) 2024-12-10T14:57:31,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742199_1375 (size=185) 2024-12-10T14:57:31,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742199_1375 (size=185) 2024-12-10T14:57:31,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742200_1376 (size=567) 2024-12-10T14:57:31,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742200_1376 (size=567) 2024-12-10T14:57:31,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742200_1376 (size=567) 2024-12-10T14:57:31,751 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-10T14:57:31,752 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-10T14:57:31,752 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-10T14:57:31,752 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-10T14:57:32,592 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-12-10T14:57:32,592 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-12-10T14:57:32,593 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-12-10T14:57:32,763 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/hadoop-4564193242632970221.jar 2024-12-10T14:57:32,764 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-10T14:57:32,764 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-10T14:57:32,834 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/hadoop-8017848755699745556.jar 2024-12-10T14:57:32,834 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-10T14:57:32,835 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-10T14:57:32,835 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-10T14:57:32,835 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-10T14:57:32,835 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-10T14:57:32,835 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-10T14:57:32,836 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-10T14:57:32,836 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-10T14:57:32,836 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-10T14:57:32,837 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-10T14:57:32,837 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-10T14:57:32,837 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-10T14:57:32,837 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-10T14:57:32,837 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-10T14:57:32,838 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-10T14:57:32,838 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-10T14:57:32,838 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-10T14:57:32,838 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-10T14:57:32,838 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-10T14:57:32,839 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-10T14:57:32,839 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-10T14:57:32,839 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-10T14:57:32,839 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-10T14:57:32,839 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-10T14:57:32,840 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-10T14:57:32,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742201_1377 (size=127628) 2024-12-10T14:57:32,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742201_1377 (size=127628) 2024-12-10T14:57:32,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742201_1377 (size=127628) 2024-12-10T14:57:32,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742202_1378 (size=2172101) 2024-12-10T14:57:32,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742202_1378 (size=2172101) 2024-12-10T14:57:32,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742202_1378 (size=2172101) 2024-12-10T14:57:32,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742203_1379 (size=213228) 2024-12-10T14:57:32,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742203_1379 (size=213228) 2024-12-10T14:57:32,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742203_1379 (size=213228) 2024-12-10T14:57:32,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742204_1380 (size=1877034) 2024-12-10T14:57:32,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742204_1380 (size=1877034) 2024-12-10T14:57:32,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742204_1380 (size=1877034) 2024-12-10T14:57:32,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742205_1381 (size=533455) 2024-12-10T14:57:32,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742205_1381 (size=533455) 2024-12-10T14:57:32,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742205_1381 (size=533455) 2024-12-10T14:57:32,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742206_1382 (size=7280644) 2024-12-10T14:57:32,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742206_1382 (size=7280644) 2024-12-10T14:57:32,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742206_1382 (size=7280644) 2024-12-10T14:57:32,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742207_1383 (size=4188619) 2024-12-10T14:57:32,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742207_1383 (size=4188619) 2024-12-10T14:57:32,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742207_1383 (size=4188619) 2024-12-10T14:57:32,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742208_1384 (size=20406) 2024-12-10T14:57:32,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742208_1384 (size=20406) 2024-12-10T14:57:32,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742208_1384 (size=20406) 2024-12-10T14:57:32,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742209_1385 (size=75495) 2024-12-10T14:57:32,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742209_1385 (size=75495) 2024-12-10T14:57:32,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742209_1385 (size=75495) 2024-12-10T14:57:33,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742210_1386 (size=45609) 2024-12-10T14:57:33,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742210_1386 (size=45609) 2024-12-10T14:57:33,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742210_1386 (size=45609) 2024-12-10T14:57:33,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742211_1387 (size=110084) 2024-12-10T14:57:33,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742211_1387 (size=110084) 2024-12-10T14:57:33,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742211_1387 (size=110084) 2024-12-10T14:57:33,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742212_1388 (size=6350861) 2024-12-10T14:57:33,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742212_1388 (size=6350861) 2024-12-10T14:57:33,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742212_1388 (size=6350861) 2024-12-10T14:57:33,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742213_1389 (size=1323991) 2024-12-10T14:57:33,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742213_1389 (size=1323991) 2024-12-10T14:57:33,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742213_1389 (size=1323991) 2024-12-10T14:57:33,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742214_1390 (size=23076) 2024-12-10T14:57:33,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742214_1390 (size=23076) 2024-12-10T14:57:33,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742214_1390 (size=23076) 2024-12-10T14:57:33,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742215_1391 (size=126803) 2024-12-10T14:57:33,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742215_1391 (size=126803) 2024-12-10T14:57:33,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742215_1391 (size=126803) 2024-12-10T14:57:33,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742216_1392 (size=322274) 2024-12-10T14:57:33,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742216_1392 (size=322274) 2024-12-10T14:57:33,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742216_1392 (size=322274) 2024-12-10T14:57:33,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742217_1393 (size=1832290) 2024-12-10T14:57:33,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742217_1393 (size=1832290) 2024-12-10T14:57:33,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742217_1393 (size=1832290) 2024-12-10T14:57:33,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742218_1394 (size=30081) 2024-12-10T14:57:33,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742218_1394 (size=30081) 2024-12-10T14:57:33,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742218_1394 (size=30081) 2024-12-10T14:57:33,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742219_1395 (size=53616) 2024-12-10T14:57:33,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742219_1395 (size=53616) 2024-12-10T14:57:33,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742219_1395 (size=53616) 2024-12-10T14:57:33,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742220_1396 (size=29229) 2024-12-10T14:57:33,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742220_1396 (size=29229) 2024-12-10T14:57:33,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742220_1396 (size=29229) 2024-12-10T14:57:33,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742221_1397 (size=169089) 2024-12-10T14:57:33,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742221_1397 (size=169089) 2024-12-10T14:57:33,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742221_1397 (size=169089) 2024-12-10T14:57:33,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742222_1398 (size=451756) 2024-12-10T14:57:33,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742222_1398 (size=451756) 2024-12-10T14:57:33,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742222_1398 (size=451756) 2024-12-10T14:57:33,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742223_1399 (size=5175431) 2024-12-10T14:57:33,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742223_1399 (size=5175431) 2024-12-10T14:57:33,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742223_1399 (size=5175431) 2024-12-10T14:57:33,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742224_1400 (size=136454) 2024-12-10T14:57:33,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742224_1400 (size=136454) 2024-12-10T14:57:33,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742224_1400 (size=136454) 2024-12-10T14:57:33,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742225_1401 (size=907858) 2024-12-10T14:57:33,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742225_1401 (size=907858) 2024-12-10T14:57:33,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742225_1401 (size=907858) 2024-12-10T14:57:33,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742226_1402 (size=3317408) 2024-12-10T14:57:33,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742226_1402 (size=3317408) 2024-12-10T14:57:33,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742226_1402 (size=3317408) 2024-12-10T14:57:33,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742227_1403 (size=503880) 2024-12-10T14:57:33,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742227_1403 (size=503880) 2024-12-10T14:57:33,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742227_1403 (size=503880) 2024-12-10T14:57:33,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742228_1404 (size=4695811) 2024-12-10T14:57:33,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742228_1404 (size=4695811) 2024-12-10T14:57:33,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742228_1404 (size=4695811) 2024-12-10T14:57:33,180 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-10T14:57:33,182 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'emptySnaptb0-testEmptyExportFileSystemState' hfile list 2024-12-10T14:57:33,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742229_1405 (size=7) 2024-12-10T14:57:33,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742229_1405 (size=7) 2024-12-10T14:57:33,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742229_1405 (size=7) 2024-12-10T14:57:33,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742230_1406 (size=10) 2024-12-10T14:57:33,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742230_1406 (size=10) 2024-12-10T14:57:33,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742230_1406 (size=10) 2024-12-10T14:57:33,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742231_1407 (size=304786) 2024-12-10T14:57:33,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742231_1407 (size=304786) 2024-12-10T14:57:33,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742231_1407 (size=304786) 2024-12-10T14:57:33,222 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-10T14:57:33,222 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-10T14:57:33,364 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733842510486_0007_000001 (auth:SIMPLE) from 127.0.0.1:34282 2024-12-10T14:57:35,625 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-10T14:57:38,450 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733842510486_0007_000001 (auth:SIMPLE) from 127.0.0.1:50296 2024-12-10T14:57:38,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742232_1408 (size=350436) 2024-12-10T14:57:38,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742232_1408 (size=350436) 2024-12-10T14:57:38,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742232_1408 (size=350436) 2024-12-10T14:57:39,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742233_1409 (size=8568) 2024-12-10T14:57:39,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742233_1409 (size=8568) 2024-12-10T14:57:39,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742233_1409 (size=8568) 2024-12-10T14:57:39,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742234_1410 (size=460) 2024-12-10T14:57:39,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742234_1410 (size=460) 2024-12-10T14:57:39,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742234_1410 (size=460) 2024-12-10T14:57:39,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742235_1411 (size=8568) 2024-12-10T14:57:39,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742235_1411 (size=8568) 2024-12-10T14:57:39,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742235_1411 (size=8568) 2024-12-10T14:57:39,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742236_1412 (size=350436) 2024-12-10T14:57:39,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742236_1412 (size=350436) 2024-12-10T14:57:39,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742236_1412 (size=350436) 2024-12-10T14:57:41,327 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-10T14:57:41,328 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-10T14:57:41,344 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: emptySnaptb0-testEmptyExportFileSystemState 2024-12-10T14:57:41,344 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-10T14:57:41,345 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-10T14:57:41,345 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_996159037_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-10T14:57:41,346 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-12-10T14:57:41,346 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-12-10T14:57:41,346 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_996159037_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842651696/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842651696/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-10T14:57:41,347 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842651696/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-12-10T14:57:41,347 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842651696/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-12-10T14:57:41,353 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testEmptyExportFileSystemState 2024-12-10T14:57:41,353 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testEmptyExportFileSystemState 2024-12-10T14:57:41,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] procedure2.ProcedureExecutor(1098): Stored pid=173, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-10T14:57:41,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-10T14:57:41,357 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733842661357"}]},"ts":"1733842661357"} 2024-12-10T14:57:41,358 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLING in hbase:meta 2024-12-10T14:57:41,361 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testEmptyExportFileSystemState to state=DISABLING 2024-12-10T14:57:41,361 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=174, ppid=173, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState}] 2024-12-10T14:57:41,363 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=175, ppid=174, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=2e4a0ab714e6ae8babc774cb4dc9d26d, UNASSIGN}, {pid=176, ppid=174, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=dfddc49fb9e56046b2f7ae9bd1f21878, UNASSIGN}] 2024-12-10T14:57:41,364 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=176, ppid=174, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=dfddc49fb9e56046b2f7ae9bd1f21878, UNASSIGN 2024-12-10T14:57:41,364 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=175, ppid=174, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=2e4a0ab714e6ae8babc774cb4dc9d26d, UNASSIGN 2024-12-10T14:57:41,365 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=176 updating hbase:meta row=dfddc49fb9e56046b2f7ae9bd1f21878, regionState=CLOSING, regionLocation=db1d50717577,41775,1733842502887 2024-12-10T14:57:41,365 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=175 updating hbase:meta row=2e4a0ab714e6ae8babc774cb4dc9d26d, regionState=CLOSING, regionLocation=db1d50717577,45255,1733842503015 2024-12-10T14:57:41,367 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-10T14:57:41,367 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=177, ppid=175, state=RUNNABLE; CloseRegionProcedure 2e4a0ab714e6ae8babc774cb4dc9d26d, server=db1d50717577,45255,1733842503015}] 2024-12-10T14:57:41,368 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-10T14:57:41,368 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=178, ppid=176, state=RUNNABLE; CloseRegionProcedure dfddc49fb9e56046b2f7ae9bd1f21878, server=db1d50717577,41775,1733842502887}] 2024-12-10T14:57:41,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-10T14:57:41,518 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,45255,1733842503015 2024-12-10T14:57:41,519 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] handler.UnassignRegionHandler(124): Close 2e4a0ab714e6ae8babc774cb4dc9d26d 2024-12-10T14:57:41,519 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-10T14:57:41,519 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1681): Closing 2e4a0ab714e6ae8babc774cb4dc9d26d, disabling compactions & flushes 2024-12-10T14:57:41,519 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1703): Closing region testtb-testEmptyExportFileSystemState,,1733842650424.2e4a0ab714e6ae8babc774cb4dc9d26d. 2024-12-10T14:57:41,519 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testEmptyExportFileSystemState,,1733842650424.2e4a0ab714e6ae8babc774cb4dc9d26d. 2024-12-10T14:57:41,519 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1791): Acquired close lock on testtb-testEmptyExportFileSystemState,,1733842650424.2e4a0ab714e6ae8babc774cb4dc9d26d. after waiting 0 ms 2024-12-10T14:57:41,519 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1801): Updates disabled for region testtb-testEmptyExportFileSystemState,,1733842650424.2e4a0ab714e6ae8babc774cb4dc9d26d. 2024-12-10T14:57:41,520 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,41775,1733842502887 2024-12-10T14:57:41,521 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] handler.UnassignRegionHandler(124): Close dfddc49fb9e56046b2f7ae9bd1f21878 2024-12-10T14:57:41,521 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-10T14:57:41,521 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1681): Closing dfddc49fb9e56046b2f7ae9bd1f21878, disabling compactions & flushes 2024-12-10T14:57:41,521 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1703): Closing region testtb-testEmptyExportFileSystemState,1,1733842650424.dfddc49fb9e56046b2f7ae9bd1f21878. 2024-12-10T14:57:41,521 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testEmptyExportFileSystemState,1,1733842650424.dfddc49fb9e56046b2f7ae9bd1f21878. 2024-12-10T14:57:41,521 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1791): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1733842650424.dfddc49fb9e56046b2f7ae9bd1f21878. after waiting 0 ms 2024-12-10T14:57:41,521 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1801): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1733842650424.dfddc49fb9e56046b2f7ae9bd1f21878. 2024-12-10T14:57:41,525 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testEmptyExportFileSystemState/2e4a0ab714e6ae8babc774cb4dc9d26d/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-10T14:57:41,526 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-10T14:57:41,526 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1922): Closed testtb-testEmptyExportFileSystemState,,1733842650424.2e4a0ab714e6ae8babc774cb4dc9d26d. 2024-12-10T14:57:41,526 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1635): Region close journal for 2e4a0ab714e6ae8babc774cb4dc9d26d: 2024-12-10T14:57:41,529 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] handler.UnassignRegionHandler(170): Closed 2e4a0ab714e6ae8babc774cb4dc9d26d 2024-12-10T14:57:41,530 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testEmptyExportFileSystemState/dfddc49fb9e56046b2f7ae9bd1f21878/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-10T14:57:41,531 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-10T14:57:41,531 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1922): Closed testtb-testEmptyExportFileSystemState,1,1733842650424.dfddc49fb9e56046b2f7ae9bd1f21878. 2024-12-10T14:57:41,531 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1635): Region close journal for dfddc49fb9e56046b2f7ae9bd1f21878: 2024-12-10T14:57:41,531 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=175 updating hbase:meta row=2e4a0ab714e6ae8babc774cb4dc9d26d, regionState=CLOSED 2024-12-10T14:57:41,532 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] handler.UnassignRegionHandler(170): Closed dfddc49fb9e56046b2f7ae9bd1f21878 2024-12-10T14:57:41,533 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=176 updating hbase:meta row=dfddc49fb9e56046b2f7ae9bd1f21878, regionState=CLOSED 2024-12-10T14:57:41,537 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=177, resume processing ppid=175 2024-12-10T14:57:41,537 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=178, resume processing ppid=176 2024-12-10T14:57:41,537 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=178, ppid=176, state=SUCCESS; CloseRegionProcedure dfddc49fb9e56046b2f7ae9bd1f21878, server=db1d50717577,41775,1733842502887 in 167 msec 2024-12-10T14:57:41,538 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=177, ppid=175, state=SUCCESS; CloseRegionProcedure 2e4a0ab714e6ae8babc774cb4dc9d26d, server=db1d50717577,45255,1733842503015 in 166 msec 2024-12-10T14:57:41,538 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=175, ppid=174, state=SUCCESS; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=2e4a0ab714e6ae8babc774cb4dc9d26d, UNASSIGN in 174 msec 2024-12-10T14:57:41,540 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=176, resume processing ppid=174 2024-12-10T14:57:41,540 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=176, ppid=174, state=SUCCESS; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=dfddc49fb9e56046b2f7ae9bd1f21878, UNASSIGN in 174 msec 2024-12-10T14:57:41,542 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=174, resume processing ppid=173 2024-12-10T14:57:41,542 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=174, ppid=173, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState in 180 msec 2024-12-10T14:57:41,549 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733842661548"}]},"ts":"1733842661548"} 2024-12-10T14:57:41,550 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLED in hbase:meta 2024-12-10T14:57:41,552 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testEmptyExportFileSystemState to state=DISABLED 2024-12-10T14:57:41,554 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=173, state=SUCCESS; DisableTableProcedure table=testtb-testEmptyExportFileSystemState in 199 msec 2024-12-10T14:57:41,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-10T14:57:41,659 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testEmptyExportFileSystemState, procId: 173 completed 2024-12-10T14:57:41,660 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testEmptyExportFileSystemState 2024-12-10T14:57:41,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] procedure2.ProcedureExecutor(1098): Stored pid=179, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-10T14:57:41,662 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=179, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-10T14:57:41,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testEmptyExportFileSystemState 2024-12-10T14:57:41,663 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=179, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-10T14:57:41,664 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41775 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testEmptyExportFileSystemState 2024-12-10T14:57:41,666 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testEmptyExportFileSystemState/2e4a0ab714e6ae8babc774cb4dc9d26d 2024-12-10T14:57:41,666 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testEmptyExportFileSystemState/dfddc49fb9e56046b2f7ae9bd1f21878 2024-12-10T14:57:41,669 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testEmptyExportFileSystemState/dfddc49fb9e56046b2f7ae9bd1f21878/cf, FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testEmptyExportFileSystemState/dfddc49fb9e56046b2f7ae9bd1f21878/recovered.edits] 2024-12-10T14:57:41,669 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testEmptyExportFileSystemState/2e4a0ab714e6ae8babc774cb4dc9d26d/cf, FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testEmptyExportFileSystemState/2e4a0ab714e6ae8babc774cb4dc9d26d/recovered.edits] 2024-12-10T14:57:41,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-10T14:57:41,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-10T14:57:41,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-10T14:57:41,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45255-0x1019cdee8310003, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-10T14:57:41,674 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-10T14:57:41,674 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-10T14:57:41,674 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-10T14:57:41,674 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-10T14:57:41,676 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testEmptyExportFileSystemState/dfddc49fb9e56046b2f7ae9bd1f21878/cf/4f244166532a4f98997a29b82bac9ab5 to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/archive/data/default/testtb-testEmptyExportFileSystemState/dfddc49fb9e56046b2f7ae9bd1f21878/cf/4f244166532a4f98997a29b82bac9ab5 2024-12-10T14:57:41,677 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-10T14:57:41,677 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-10T14:57:41,677 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-10T14:57:41,677 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45255-0x1019cdee8310003, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-10T14:57:41,677 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:57:41,677 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:57:41,677 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45255-0x1019cdee8310003, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:57:41,677 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:57:41,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-10T14:57:41,678 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-10T14:57:41,678 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-10T14:57:41,678 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-10T14:57:41,678 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-10T14:57:41,678 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testEmptyExportFileSystemState/2e4a0ab714e6ae8babc774cb4dc9d26d/cf/d6835cf58de340ec8c457db6b7e4ddb7 to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/archive/data/default/testtb-testEmptyExportFileSystemState/2e4a0ab714e6ae8babc774cb4dc9d26d/cf/d6835cf58de340ec8c457db6b7e4ddb7 2024-12-10T14:57:41,682 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testEmptyExportFileSystemState/dfddc49fb9e56046b2f7ae9bd1f21878/recovered.edits/9.seqid to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/archive/data/default/testtb-testEmptyExportFileSystemState/dfddc49fb9e56046b2f7ae9bd1f21878/recovered.edits/9.seqid 2024-12-10T14:57:41,682 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testEmptyExportFileSystemState/dfddc49fb9e56046b2f7ae9bd1f21878 2024-12-10T14:57:41,683 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testEmptyExportFileSystemState/2e4a0ab714e6ae8babc774cb4dc9d26d/recovered.edits/9.seqid to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/archive/data/default/testtb-testEmptyExportFileSystemState/2e4a0ab714e6ae8babc774cb4dc9d26d/recovered.edits/9.seqid 2024-12-10T14:57:41,683 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testEmptyExportFileSystemState/2e4a0ab714e6ae8babc774cb4dc9d26d 2024-12-10T14:57:41,684 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testEmptyExportFileSystemState regions 2024-12-10T14:57:41,686 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=179, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-10T14:57:41,689 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testEmptyExportFileSystemState from hbase:meta 2024-12-10T14:57:41,695 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testEmptyExportFileSystemState' descriptor. 2024-12-10T14:57:41,696 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=179, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-10T14:57:41,696 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testEmptyExportFileSystemState' from region states. 2024-12-10T14:57:41,696 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,,1733842650424.2e4a0ab714e6ae8babc774cb4dc9d26d.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733842661696"}]},"ts":"9223372036854775807"} 2024-12-10T14:57:41,696 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,1,1733842650424.dfddc49fb9e56046b2f7ae9bd1f21878.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733842661696"}]},"ts":"9223372036854775807"} 2024-12-10T14:57:41,701 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-10T14:57:41,701 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 2e4a0ab714e6ae8babc774cb4dc9d26d, NAME => 'testtb-testEmptyExportFileSystemState,,1733842650424.2e4a0ab714e6ae8babc774cb4dc9d26d.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => dfddc49fb9e56046b2f7ae9bd1f21878, NAME => 'testtb-testEmptyExportFileSystemState,1,1733842650424.dfddc49fb9e56046b2f7ae9bd1f21878.', STARTKEY => '1', ENDKEY => ''}] 2024-12-10T14:57:41,701 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testEmptyExportFileSystemState' as deleted. 2024-12-10T14:57:41,701 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733842661701"}]},"ts":"9223372036854775807"} 2024-12-10T14:57:41,706 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testEmptyExportFileSystemState state from META 2024-12-10T14:57:41,708 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=179, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-10T14:57:41,709 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=179, state=SUCCESS; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState in 48 msec 2024-12-10T14:57:41,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-10T14:57:41,780 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testEmptyExportFileSystemState, procId: 179 completed 2024-12-10T14:57:41,788 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testEmptyExportFileSystemState" 2024-12-10T14:57:41,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testEmptyExportFileSystemState 2024-12-10T14:57:41,791 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testEmptyExportFileSystemState" 2024-12-10T14:57:41,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testEmptyExportFileSystemState 2024-12-10T14:57:41,828 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testEmptyExportFileSystemState Thread=816 (was 801) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_468467357_1 at /127.0.0.1:50300 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1891196410) connection to localhost/127.0.0.1:46029 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_996159037_22 at /127.0.0.1:56372 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_996159037_22 at /127.0.0.1:43482 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_996159037_22 at /127.0.0.1:34722 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x50bdc134-shared-pool-40 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-5565 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: IPC Client (1891196410) connection to localhost/127.0.0.1:34295 from appattempt_1733842510486_0007_000001 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_468467357_1 at /127.0.0.1:56882 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x50bdc134-shared-pool-39 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46029 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 6718) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x50bdc134-shared-pool-41 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x50bdc134-shared-pool-42 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=823 (was 795) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=447 (was 463), ProcessCount=17 (was 11) - ProcessCount LEAK? -, AvailableMemoryMB=664 (was 1422) 2024-12-10T14:57:41,828 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=816 is superior to 500 2024-12-10T14:57:41,857 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithChecksum Thread=816, OpenFileDescriptor=823, MaxFileDescriptor=1048576, SystemLoadAverage=447, ProcessCount=17, AvailableMemoryMB=658 2024-12-10T14:57:41,857 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=816 is superior to 500 2024-12-10T14:57:41,859 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-10T14:57:41,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] procedure2.ProcedureExecutor(1098): Stored pid=180, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportWithChecksum 2024-12-10T14:57:41,861 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_PRE_OPERATION 2024-12-10T14:57:41,862 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:57:41,862 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithChecksum" procId is: 180 2024-12-10T14:57:41,862 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-10T14:57:41,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=180 2024-12-10T14:57:41,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742237_1413 (size=404) 2024-12-10T14:57:41,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742237_1413 (size=404) 2024-12-10T14:57:41,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742237_1413 (size=404) 2024-12-10T14:57:41,885 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => cf14e58a3b5af251a9b5d988345a578d, NAME => 'testtb-testExportWithChecksum,,1733842661859.cf14e58a3b5af251a9b5d988345a578d.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef 2024-12-10T14:57:41,886 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 62c582e8d3f6aff23dc007e67ab43f95, NAME => 'testtb-testExportWithChecksum,1,1733842661859.62c582e8d3f6aff23dc007e67ab43f95.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef 2024-12-10T14:57:41,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742238_1414 (size=65) 2024-12-10T14:57:41,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742238_1414 (size=65) 2024-12-10T14:57:41,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742238_1414 (size=65) 2024-12-10T14:57:41,914 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportWithChecksum,,1733842661859.cf14e58a3b5af251a9b5d988345a578d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:57:41,914 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1681): Closing cf14e58a3b5af251a9b5d988345a578d, disabling compactions & flushes 2024-12-10T14:57:41,914 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportWithChecksum,,1733842661859.cf14e58a3b5af251a9b5d988345a578d. 2024-12-10T14:57:41,914 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithChecksum,,1733842661859.cf14e58a3b5af251a9b5d988345a578d. 2024-12-10T14:57:41,914 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithChecksum,,1733842661859.cf14e58a3b5af251a9b5d988345a578d. after waiting 0 ms 2024-12-10T14:57:41,914 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithChecksum,,1733842661859.cf14e58a3b5af251a9b5d988345a578d. 2024-12-10T14:57:41,914 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportWithChecksum,,1733842661859.cf14e58a3b5af251a9b5d988345a578d. 2024-12-10T14:57:41,914 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1635): Region close journal for cf14e58a3b5af251a9b5d988345a578d: 2024-12-10T14:57:41,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742239_1415 (size=65) 2024-12-10T14:57:41,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742239_1415 (size=65) 2024-12-10T14:57:41,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742239_1415 (size=65) 2024-12-10T14:57:41,927 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportWithChecksum,1,1733842661859.62c582e8d3f6aff23dc007e67ab43f95.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:57:41,928 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1681): Closing 62c582e8d3f6aff23dc007e67ab43f95, disabling compactions & flushes 2024-12-10T14:57:41,928 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportWithChecksum,1,1733842661859.62c582e8d3f6aff23dc007e67ab43f95. 2024-12-10T14:57:41,928 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithChecksum,1,1733842661859.62c582e8d3f6aff23dc007e67ab43f95. 2024-12-10T14:57:41,928 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithChecksum,1,1733842661859.62c582e8d3f6aff23dc007e67ab43f95. after waiting 0 ms 2024-12-10T14:57:41,928 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithChecksum,1,1733842661859.62c582e8d3f6aff23dc007e67ab43f95. 2024-12-10T14:57:41,928 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportWithChecksum,1,1733842661859.62c582e8d3f6aff23dc007e67ab43f95. 2024-12-10T14:57:41,928 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1635): Region close journal for 62c582e8d3f6aff23dc007e67ab43f95: 2024-12-10T14:57:41,929 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ADD_TO_META 2024-12-10T14:57:41,930 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,,1733842661859.cf14e58a3b5af251a9b5d988345a578d.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733842661929"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733842661929"}]},"ts":"1733842661929"} 2024-12-10T14:57:41,930 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,1,1733842661859.62c582e8d3f6aff23dc007e67ab43f95.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733842661929"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733842661929"}]},"ts":"1733842661929"} 2024-12-10T14:57:41,933 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-10T14:57:41,935 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-10T14:57:41,935 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733842661935"}]},"ts":"1733842661935"} 2024-12-10T14:57:41,936 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithChecksum, state=ENABLING in hbase:meta 2024-12-10T14:57:41,941 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(202): Hosts are {db1d50717577=0} racks are {/default-rack=0} 2024-12-10T14:57:41,942 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-10T14:57:41,942 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-10T14:57:41,943 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-10T14:57:41,943 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-10T14:57:41,943 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-10T14:57:41,943 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-10T14:57:41,943 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-10T14:57:41,943 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=cf14e58a3b5af251a9b5d988345a578d, ASSIGN}, {pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=62c582e8d3f6aff23dc007e67ab43f95, ASSIGN}] 2024-12-10T14:57:41,944 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=62c582e8d3f6aff23dc007e67ab43f95, ASSIGN 2024-12-10T14:57:41,944 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=cf14e58a3b5af251a9b5d988345a578d, ASSIGN 2024-12-10T14:57:41,945 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=cf14e58a3b5af251a9b5d988345a578d, ASSIGN; state=OFFLINE, location=db1d50717577,37707,1733842502956; forceNewPlan=false, retain=false 2024-12-10T14:57:41,946 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=62c582e8d3f6aff23dc007e67ab43f95, ASSIGN; state=OFFLINE, location=db1d50717577,45255,1733842503015; forceNewPlan=false, retain=false 2024-12-10T14:57:41,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=180 2024-12-10T14:57:42,096 INFO [db1d50717577:36533 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-10T14:57:42,096 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=182 updating hbase:meta row=62c582e8d3f6aff23dc007e67ab43f95, regionState=OPENING, regionLocation=db1d50717577,45255,1733842503015 2024-12-10T14:57:42,096 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=181 updating hbase:meta row=cf14e58a3b5af251a9b5d988345a578d, regionState=OPENING, regionLocation=db1d50717577,37707,1733842502956 2024-12-10T14:57:42,097 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=183, ppid=181, state=RUNNABLE; OpenRegionProcedure cf14e58a3b5af251a9b5d988345a578d, server=db1d50717577,37707,1733842502956}] 2024-12-10T14:57:42,098 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=184, ppid=182, state=RUNNABLE; OpenRegionProcedure 62c582e8d3f6aff23dc007e67ab43f95, server=db1d50717577,45255,1733842503015}] 2024-12-10T14:57:42,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=180 2024-12-10T14:57:42,250 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,45255,1733842503015 2024-12-10T14:57:42,250 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,37707,1733842502956 2024-12-10T14:57:42,253 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] handler.AssignRegionHandler(135): Open testtb-testExportWithChecksum,1,1733842661859.62c582e8d3f6aff23dc007e67ab43f95. 2024-12-10T14:57:42,253 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] handler.AssignRegionHandler(135): Open testtb-testExportWithChecksum,,1733842661859.cf14e58a3b5af251a9b5d988345a578d. 2024-12-10T14:57:42,253 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7285): Opening region: {ENCODED => cf14e58a3b5af251a9b5d988345a578d, NAME => 'testtb-testExportWithChecksum,,1733842661859.cf14e58a3b5af251a9b5d988345a578d.', STARTKEY => '', ENDKEY => '1'} 2024-12-10T14:57:42,253 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7285): Opening region: {ENCODED => 62c582e8d3f6aff23dc007e67ab43f95, NAME => 'testtb-testExportWithChecksum,1,1733842661859.62c582e8d3f6aff23dc007e67ab43f95.', STARTKEY => '1', ENDKEY => ''} 2024-12-10T14:57:42,254 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportWithChecksum,1,1733842661859.62c582e8d3f6aff23dc007e67ab43f95. service=AccessControlService 2024-12-10T14:57:42,254 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportWithChecksum,,1733842661859.cf14e58a3b5af251a9b5d988345a578d. service=AccessControlService 2024-12-10T14:57:42,254 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-10T14:57:42,254 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-10T14:57:42,254 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum 62c582e8d3f6aff23dc007e67ab43f95 2024-12-10T14:57:42,254 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum cf14e58a3b5af251a9b5d988345a578d 2024-12-10T14:57:42,254 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(894): Instantiated testtb-testExportWithChecksum,1,1733842661859.62c582e8d3f6aff23dc007e67ab43f95.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:57:42,254 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(894): Instantiated testtb-testExportWithChecksum,,1733842661859.cf14e58a3b5af251a9b5d988345a578d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:57:42,254 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7327): checking encryption for 62c582e8d3f6aff23dc007e67ab43f95 2024-12-10T14:57:42,254 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7327): checking encryption for cf14e58a3b5af251a9b5d988345a578d 2024-12-10T14:57:42,254 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7330): checking classloading for 62c582e8d3f6aff23dc007e67ab43f95 2024-12-10T14:57:42,254 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7330): checking classloading for cf14e58a3b5af251a9b5d988345a578d 2024-12-10T14:57:42,256 INFO [StoreOpener-62c582e8d3f6aff23dc007e67ab43f95-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 62c582e8d3f6aff23dc007e67ab43f95 2024-12-10T14:57:42,256 INFO [StoreOpener-cf14e58a3b5af251a9b5d988345a578d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region cf14e58a3b5af251a9b5d988345a578d 2024-12-10T14:57:42,258 INFO [StoreOpener-62c582e8d3f6aff23dc007e67ab43f95-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 62c582e8d3f6aff23dc007e67ab43f95 columnFamilyName cf 2024-12-10T14:57:42,258 DEBUG [StoreOpener-62c582e8d3f6aff23dc007e67ab43f95-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:57:42,258 INFO [StoreOpener-62c582e8d3f6aff23dc007e67ab43f95-1 {}] regionserver.HStore(327): Store=62c582e8d3f6aff23dc007e67ab43f95/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T14:57:42,259 INFO [StoreOpener-cf14e58a3b5af251a9b5d988345a578d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cf14e58a3b5af251a9b5d988345a578d columnFamilyName cf 2024-12-10T14:57:42,259 DEBUG [StoreOpener-cf14e58a3b5af251a9b5d988345a578d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:57:42,259 INFO [StoreOpener-cf14e58a3b5af251a9b5d988345a578d-1 {}] regionserver.HStore(327): Store=cf14e58a3b5af251a9b5d988345a578d/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T14:57:42,259 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithChecksum/62c582e8d3f6aff23dc007e67ab43f95 2024-12-10T14:57:42,260 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithChecksum/62c582e8d3f6aff23dc007e67ab43f95 2024-12-10T14:57:42,260 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithChecksum/cf14e58a3b5af251a9b5d988345a578d 2024-12-10T14:57:42,260 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithChecksum/cf14e58a3b5af251a9b5d988345a578d 2024-12-10T14:57:42,262 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1085): writing seq id for 62c582e8d3f6aff23dc007e67ab43f95 2024-12-10T14:57:42,262 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1085): writing seq id for cf14e58a3b5af251a9b5d988345a578d 2024-12-10T14:57:42,265 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithChecksum/62c582e8d3f6aff23dc007e67ab43f95/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T14:57:42,266 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1102): Opened 62c582e8d3f6aff23dc007e67ab43f95; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67625568, jitterRate=0.007699489593505859}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-10T14:57:42,267 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1001): Region open journal for 62c582e8d3f6aff23dc007e67ab43f95: 2024-12-10T14:57:42,268 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithChecksum/cf14e58a3b5af251a9b5d988345a578d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T14:57:42,268 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1102): Opened cf14e58a3b5af251a9b5d988345a578d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74428130, jitterRate=0.10906556248664856}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-10T14:57:42,268 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportWithChecksum,1,1733842661859.62c582e8d3f6aff23dc007e67ab43f95., pid=184, masterSystemTime=1733842662250 2024-12-10T14:57:42,268 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1001): Region open journal for cf14e58a3b5af251a9b5d988345a578d: 2024-12-10T14:57:42,269 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportWithChecksum,,1733842661859.cf14e58a3b5af251a9b5d988345a578d., pid=183, masterSystemTime=1733842662250 2024-12-10T14:57:42,270 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportWithChecksum,1,1733842661859.62c582e8d3f6aff23dc007e67ab43f95. 2024-12-10T14:57:42,270 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] handler.AssignRegionHandler(164): Opened testtb-testExportWithChecksum,1,1733842661859.62c582e8d3f6aff23dc007e67ab43f95. 2024-12-10T14:57:42,270 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=182 updating hbase:meta row=62c582e8d3f6aff23dc007e67ab43f95, regionState=OPEN, openSeqNum=2, regionLocation=db1d50717577,45255,1733842503015 2024-12-10T14:57:42,270 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportWithChecksum,,1733842661859.cf14e58a3b5af251a9b5d988345a578d. 2024-12-10T14:57:42,270 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] handler.AssignRegionHandler(164): Opened testtb-testExportWithChecksum,,1733842661859.cf14e58a3b5af251a9b5d988345a578d. 2024-12-10T14:57:42,271 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=181 updating hbase:meta row=cf14e58a3b5af251a9b5d988345a578d, regionState=OPEN, openSeqNum=2, regionLocation=db1d50717577,37707,1733842502956 2024-12-10T14:57:42,274 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=184, resume processing ppid=182 2024-12-10T14:57:42,274 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=184, ppid=182, state=SUCCESS; OpenRegionProcedure 62c582e8d3f6aff23dc007e67ab43f95, server=db1d50717577,45255,1733842503015 in 174 msec 2024-12-10T14:57:42,275 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=183, resume processing ppid=181 2024-12-10T14:57:42,275 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=183, ppid=181, state=SUCCESS; OpenRegionProcedure cf14e58a3b5af251a9b5d988345a578d, server=db1d50717577,37707,1733842502956 in 176 msec 2024-12-10T14:57:42,276 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=182, ppid=180, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=62c582e8d3f6aff23dc007e67ab43f95, ASSIGN in 331 msec 2024-12-10T14:57:42,277 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=181, resume processing ppid=180 2024-12-10T14:57:42,277 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=181, ppid=180, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=cf14e58a3b5af251a9b5d988345a578d, ASSIGN in 332 msec 2024-12-10T14:57:42,278 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-10T14:57:42,278 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733842662278"}]},"ts":"1733842662278"} 2024-12-10T14:57:42,280 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithChecksum, state=ENABLED in hbase:meta 2024-12-10T14:57:42,282 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_POST_OPERATION 2024-12-10T14:57:42,282 DEBUG [PEWorker-2 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportWithChecksum jenkins: RWXCA 2024-12-10T14:57:42,284 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41775 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-10T14:57:42,286 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:57:42,286 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:57:42,286 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:57:42,286 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45255-0x1019cdee8310003, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:57:42,289 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-10T14:57:42,289 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-10T14:57:42,289 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-10T14:57:42,289 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-10T14:57:42,289 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-10T14:57:42,290 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-10T14:57:42,290 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-10T14:57:42,290 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-10T14:57:42,291 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=180, state=SUCCESS; CreateTableProcedure table=testtb-testExportWithChecksum in 429 msec 2024-12-10T14:57:42,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=180 2024-12-10T14:57:42,467 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportWithChecksum, procId: 180 completed 2024-12-10T14:57:42,467 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testExportWithChecksum get assigned. Timeout = 60000ms 2024-12-10T14:57:42,467 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-10T14:57:42,471 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testExportWithChecksum assigned to meta. Checking AM states. 2024-12-10T14:57:42,471 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-10T14:57:42,471 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testExportWithChecksum assigned. 2024-12-10T14:57:42,474 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-10T14:57:42,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733842662474 (current time:1733842662474). 2024-12-10T14:57:42,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-10T14:57:42,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-12-10T14:57:42,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-10T14:57:42,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x38b64f8a to 127.0.0.1:57176 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1e0f49d1 2024-12-10T14:57:42,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72fa13ce, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:57:42,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:57:42,481 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35852, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:57:42,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x38b64f8a to 127.0.0.1:57176 2024-12-10T14:57:42,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:57:42,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1eabf28f to 127.0.0.1:57176 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7a0e614a 2024-12-10T14:57:42,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@58c869a6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:57:42,490 DEBUG [hconnection-0x15c238a5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:57:42,491 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35856, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:57:42,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:57:42,494 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44460, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:57:42,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1eabf28f to 127.0.0.1:57176 2024-12-10T14:57:42,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:57:42,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-10T14:57:42,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-10T14:57:42,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] procedure2.ProcedureExecutor(1098): Stored pid=185, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-10T14:57:42,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 185 2024-12-10T14:57:42,497 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-10T14:57:42,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-12-10T14:57:42,498 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-10T14:57:42,500 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-10T14:57:42,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742240_1416 (size=161) 2024-12-10T14:57:42,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742240_1416 (size=161) 2024-12-10T14:57:42,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742240_1416 (size=161) 2024-12-10T14:57:42,510 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-10T14:57:42,510 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=186, ppid=185, state=RUNNABLE; SnapshotRegionProcedure cf14e58a3b5af251a9b5d988345a578d}, {pid=187, ppid=185, state=RUNNABLE; SnapshotRegionProcedure 62c582e8d3f6aff23dc007e67ab43f95}] 2024-12-10T14:57:42,517 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=187, ppid=185, state=RUNNABLE; SnapshotRegionProcedure 62c582e8d3f6aff23dc007e67ab43f95 2024-12-10T14:57:42,517 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=186, ppid=185, state=RUNNABLE; SnapshotRegionProcedure cf14e58a3b5af251a9b5d988345a578d 2024-12-10T14:57:42,592 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-12-10T14:57:42,592 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum Metrics about Tables on a single HBase RegionServer 2024-12-10T14:57:42,593 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-12-10T14:57:42,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-12-10T14:57:42,669 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,45255,1733842503015 2024-12-10T14:57:42,669 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,37707,1733842502956 2024-12-10T14:57:42,670 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45255 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=187 2024-12-10T14:57:42,670 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37707 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=186 2024-12-10T14:57:42,670 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1733842661859.62c582e8d3f6aff23dc007e67ab43f95. 2024-12-10T14:57:42,670 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1733842661859.cf14e58a3b5af251a9b5d988345a578d. 2024-12-10T14:57:42,670 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegion(2538): Flush status journal for cf14e58a3b5af251a9b5d988345a578d: 2024-12-10T14:57:42,670 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegion(2538): Flush status journal for 62c582e8d3f6aff23dc007e67ab43f95: 2024-12-10T14:57:42,670 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1733842661859.cf14e58a3b5af251a9b5d988345a578d. for emptySnaptb0-testExportWithChecksum completed. 2024-12-10T14:57:42,670 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1733842661859.62c582e8d3f6aff23dc007e67ab43f95. for emptySnaptb0-testExportWithChecksum completed. 2024-12-10T14:57:42,670 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1733842661859.cf14e58a3b5af251a9b5d988345a578d.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-12-10T14:57:42,670 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1733842661859.62c582e8d3f6aff23dc007e67ab43f95.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-12-10T14:57:42,671 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-10T14:57:42,671 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-10T14:57:42,671 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-10T14:57:42,671 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-10T14:57:42,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742241_1417 (size=68) 2024-12-10T14:57:42,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742241_1417 (size=68) 2024-12-10T14:57:42,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742241_1417 (size=68) 2024-12-10T14:57:42,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742242_1418 (size=68) 2024-12-10T14:57:42,685 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1733842661859.62c582e8d3f6aff23dc007e67ab43f95. 2024-12-10T14:57:42,685 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=187 2024-12-10T14:57:42,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.HMaster(4106): Remote procedure done, pid=187 2024-12-10T14:57:42,686 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1733842661859.cf14e58a3b5af251a9b5d988345a578d. 2024-12-10T14:57:42,686 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region 62c582e8d3f6aff23dc007e67ab43f95 2024-12-10T14:57:42,686 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=186 2024-12-10T14:57:42,686 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=187, ppid=185, state=RUNNABLE; SnapshotRegionProcedure 62c582e8d3f6aff23dc007e67ab43f95 2024-12-10T14:57:42,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.HMaster(4106): Remote procedure done, pid=186 2024-12-10T14:57:42,687 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region cf14e58a3b5af251a9b5d988345a578d 2024-12-10T14:57:42,687 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=186, ppid=185, state=RUNNABLE; SnapshotRegionProcedure cf14e58a3b5af251a9b5d988345a578d 2024-12-10T14:57:42,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742242_1418 (size=68) 2024-12-10T14:57:42,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742242_1418 (size=68) 2024-12-10T14:57:42,688 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=187, ppid=185, state=SUCCESS; SnapshotRegionProcedure 62c582e8d3f6aff23dc007e67ab43f95 in 177 msec 2024-12-10T14:57:42,689 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=186, resume processing ppid=185 2024-12-10T14:57:42,689 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=186, ppid=185, state=SUCCESS; SnapshotRegionProcedure cf14e58a3b5af251a9b5d988345a578d in 177 msec 2024-12-10T14:57:42,689 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-10T14:57:42,691 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-10T14:57:42,692 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-10T14:57:42,692 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithChecksum 2024-12-10T14:57:42,692 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum 2024-12-10T14:57:42,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742243_1419 (size=543) 2024-12-10T14:57:42,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742243_1419 (size=543) 2024-12-10T14:57:42,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742243_1419 (size=543) 2024-12-10T14:57:42,703 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-10T14:57:42,706 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-10T14:57:42,707 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/emptySnaptb0-testExportWithChecksum 2024-12-10T14:57:42,708 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-10T14:57:42,708 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 185 2024-12-10T14:57:42,709 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=185, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 212 msec 2024-12-10T14:57:42,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-12-10T14:57:42,801 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum, procId: 185 completed 2024-12-10T14:57:42,809 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45255 {}] regionserver.HRegion(8254): writing data to region testtb-testExportWithChecksum,1,1733842661859.62c582e8d3f6aff23dc007e67ab43f95. with WAL disabled. Data may be lost in the event of a crash. 2024-12-10T14:57:42,819 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37707 {}] regionserver.HRegion(8254): writing data to region testtb-testExportWithChecksum,,1733842661859.cf14e58a3b5af251a9b5d988345a578d. with WAL disabled. Data may be lost in the event of a crash. 2024-12-10T14:57:42,822 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportWithChecksum 2024-12-10T14:57:42,822 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportWithChecksum,,1733842661859.cf14e58a3b5af251a9b5d988345a578d. 2024-12-10T14:57:42,822 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-10T14:57:42,837 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-10T14:57:42,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733842662837 (current time:1733842662837). 2024-12-10T14:57:42,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-10T14:57:42,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-12-10T14:57:42,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-10T14:57:42,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x462f67bd to 127.0.0.1:57176 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@422c78cd 2024-12-10T14:57:42,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72587bdf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:57:42,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:57:42,847 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35866, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:57:42,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x462f67bd to 127.0.0.1:57176 2024-12-10T14:57:42,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:57:42,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1f115a85 to 127.0.0.1:57176 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3edaa3ba 2024-12-10T14:57:42,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@feda13d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:57:42,854 DEBUG [hconnection-0x5b18d273-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:57:42,855 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35868, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:57:42,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:57:42,858 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44466, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:57:42,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1f115a85 to 127.0.0.1:57176 2024-12-10T14:57:42,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:57:42,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-10T14:57:42,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-10T14:57:42,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] procedure2.ProcedureExecutor(1098): Stored pid=188, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-10T14:57:42,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 188 2024-12-10T14:57:42,862 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-10T14:57:42,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=188 2024-12-10T14:57:42,862 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-10T14:57:42,867 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-10T14:57:42,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742244_1420 (size=156) 2024-12-10T14:57:42,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742244_1420 (size=156) 2024-12-10T14:57:42,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742244_1420 (size=156) 2024-12-10T14:57:42,880 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-10T14:57:42,880 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=189, ppid=188, state=RUNNABLE; SnapshotRegionProcedure cf14e58a3b5af251a9b5d988345a578d}, {pid=190, ppid=188, state=RUNNABLE; SnapshotRegionProcedure 62c582e8d3f6aff23dc007e67ab43f95}] 2024-12-10T14:57:42,880 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=190, ppid=188, state=RUNNABLE; SnapshotRegionProcedure 62c582e8d3f6aff23dc007e67ab43f95 2024-12-10T14:57:42,880 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=189, ppid=188, state=RUNNABLE; SnapshotRegionProcedure cf14e58a3b5af251a9b5d988345a578d 2024-12-10T14:57:42,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=188 2024-12-10T14:57:43,031 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,45255,1733842503015 2024-12-10T14:57:43,031 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,37707,1733842502956 2024-12-10T14:57:43,032 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37707 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=189 2024-12-10T14:57:43,032 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1733842661859.cf14e58a3b5af251a9b5d988345a578d. 2024-12-10T14:57:43,032 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45255 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=190 2024-12-10T14:57:43,032 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.HRegion(2837): Flushing cf14e58a3b5af251a9b5d988345a578d 1/1 column families, dataSize=199 B heapSize=688 B 2024-12-10T14:57:43,032 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1733842661859.62c582e8d3f6aff23dc007e67ab43f95. 2024-12-10T14:57:43,033 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.HRegion(2837): Flushing 62c582e8d3f6aff23dc007e67ab43f95 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-12-10T14:57:43,057 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithChecksum/62c582e8d3f6aff23dc007e67ab43f95/.tmp/cf/ae000709e7d04715aba3181dabdbece2 is 71, key is 1629c2dfd7b50c490c66f00f6d647b7b/cf:q/1733842662809/Put/seqid=0 2024-12-10T14:57:43,060 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithChecksum/cf14e58a3b5af251a9b5d988345a578d/.tmp/cf/b56b59cde73e4285bdc11cc9ea77d9ea is 71, key is 006837ac59ca5bb740478935088825d3/cf:q/1733842662819/Put/seqid=0 2024-12-10T14:57:43,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742245_1421 (size=8326) 2024-12-10T14:57:43,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742245_1421 (size=8326) 2024-12-10T14:57:43,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742245_1421 (size=8326) 2024-12-10T14:57:43,077 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.06 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithChecksum/62c582e8d3f6aff23dc007e67ab43f95/.tmp/cf/ae000709e7d04715aba3181dabdbece2 2024-12-10T14:57:43,084 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithChecksum/62c582e8d3f6aff23dc007e67ab43f95/.tmp/cf/ae000709e7d04715aba3181dabdbece2 as hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithChecksum/62c582e8d3f6aff23dc007e67ab43f95/cf/ae000709e7d04715aba3181dabdbece2 2024-12-10T14:57:43,090 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithChecksum/62c582e8d3f6aff23dc007e67ab43f95/cf/ae000709e7d04715aba3181dabdbece2, entries=47, sequenceid=6, filesize=8.1 K 2024-12-10T14:57:43,091 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.HRegion(3040): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for 62c582e8d3f6aff23dc007e67ab43f95 in 59ms, sequenceid=6, compaction requested=false 2024-12-10T14:57:43,091 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithChecksum' 2024-12-10T14:57:43,092 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.HRegion(2538): Flush status journal for 62c582e8d3f6aff23dc007e67ab43f95: 2024-12-10T14:57:43,092 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1733842661859.62c582e8d3f6aff23dc007e67ab43f95. for snaptb0-testExportWithChecksum completed. 2024-12-10T14:57:43,092 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1733842661859.62c582e8d3f6aff23dc007e67ab43f95.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-12-10T14:57:43,092 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-10T14:57:43,092 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithChecksum/62c582e8d3f6aff23dc007e67ab43f95/cf/ae000709e7d04715aba3181dabdbece2] hfiles 2024-12-10T14:57:43,092 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithChecksum/62c582e8d3f6aff23dc007e67ab43f95/cf/ae000709e7d04715aba3181dabdbece2 for snapshot=snaptb0-testExportWithChecksum 2024-12-10T14:57:43,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742246_1422 (size=5286) 2024-12-10T14:57:43,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742246_1422 (size=5286) 2024-12-10T14:57:43,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742246_1422 (size=5286) 2024-12-10T14:57:43,095 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=199 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithChecksum/cf14e58a3b5af251a9b5d988345a578d/.tmp/cf/b56b59cde73e4285bdc11cc9ea77d9ea 2024-12-10T14:57:43,100 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithChecksum/cf14e58a3b5af251a9b5d988345a578d/.tmp/cf/b56b59cde73e4285bdc11cc9ea77d9ea as hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithChecksum/cf14e58a3b5af251a9b5d988345a578d/cf/b56b59cde73e4285bdc11cc9ea77d9ea 2024-12-10T14:57:43,104 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithChecksum/cf14e58a3b5af251a9b5d988345a578d/cf/b56b59cde73e4285bdc11cc9ea77d9ea, entries=3, sequenceid=6, filesize=5.2 K 2024-12-10T14:57:43,105 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.HRegion(3040): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for cf14e58a3b5af251a9b5d988345a578d in 73ms, sequenceid=6, compaction requested=false 2024-12-10T14:57:43,106 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.HRegion(2538): Flush status journal for cf14e58a3b5af251a9b5d988345a578d: 2024-12-10T14:57:43,106 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1733842661859.cf14e58a3b5af251a9b5d988345a578d. for snaptb0-testExportWithChecksum completed. 2024-12-10T14:57:43,106 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1733842661859.cf14e58a3b5af251a9b5d988345a578d.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-12-10T14:57:43,106 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-10T14:57:43,106 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithChecksum/cf14e58a3b5af251a9b5d988345a578d/cf/b56b59cde73e4285bdc11cc9ea77d9ea] hfiles 2024-12-10T14:57:43,106 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithChecksum/cf14e58a3b5af251a9b5d988345a578d/cf/b56b59cde73e4285bdc11cc9ea77d9ea for snapshot=snaptb0-testExportWithChecksum 2024-12-10T14:57:43,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742247_1423 (size=107) 2024-12-10T14:57:43,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742247_1423 (size=107) 2024-12-10T14:57:43,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742247_1423 (size=107) 2024-12-10T14:57:43,115 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1733842661859.62c582e8d3f6aff23dc007e67ab43f95. 2024-12-10T14:57:43,115 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=190 2024-12-10T14:57:43,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.HMaster(4106): Remote procedure done, pid=190 2024-12-10T14:57:43,116 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region 62c582e8d3f6aff23dc007e67ab43f95 2024-12-10T14:57:43,116 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=190, ppid=188, state=RUNNABLE; SnapshotRegionProcedure 62c582e8d3f6aff23dc007e67ab43f95 2024-12-10T14:57:43,117 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=190, ppid=188, state=SUCCESS; SnapshotRegionProcedure 62c582e8d3f6aff23dc007e67ab43f95 in 236 msec 2024-12-10T14:57:43,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742248_1424 (size=107) 2024-12-10T14:57:43,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742248_1424 (size=107) 2024-12-10T14:57:43,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742248_1424 (size=107) 2024-12-10T14:57:43,130 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1733842661859.cf14e58a3b5af251a9b5d988345a578d. 2024-12-10T14:57:43,130 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=189 2024-12-10T14:57:43,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.HMaster(4106): Remote procedure done, pid=189 2024-12-10T14:57:43,130 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region cf14e58a3b5af251a9b5d988345a578d 2024-12-10T14:57:43,130 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=189, ppid=188, state=RUNNABLE; SnapshotRegionProcedure cf14e58a3b5af251a9b5d988345a578d 2024-12-10T14:57:43,132 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=189, resume processing ppid=188 2024-12-10T14:57:43,132 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=189, ppid=188, state=SUCCESS; SnapshotRegionProcedure cf14e58a3b5af251a9b5d988345a578d in 251 msec 2024-12-10T14:57:43,132 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-10T14:57:43,133 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-10T14:57:43,134 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-10T14:57:43,134 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithChecksum 2024-12-10T14:57:43,134 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-10T14:57:43,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742249_1425 (size=621) 2024-12-10T14:57:43,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742249_1425 (size=621) 2024-12-10T14:57:43,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742249_1425 (size=621) 2024-12-10T14:57:43,152 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-10T14:57:43,157 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-10T14:57:43,157 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-10T14:57:43,158 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-10T14:57:43,159 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 188 2024-12-10T14:57:43,159 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=188, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 299 msec 2024-12-10T14:57:43,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=188 2024-12-10T14:57:43,164 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum, procId: 188 completed 2024-12-10T14:57:43,164 INFO [Time-limited test {}] snapshot.TestExportSnapshot(476): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/local-export-1733842663164 2024-12-10T14:57:43,164 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/local-export-1733842663164, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/local-export-1733842663164, srcFsUri=hdfs://localhost:36449, srcDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef 2024-12-10T14:57:43,193 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:36449, inputRoot=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef 2024-12-10T14:57:43,193 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=org.apache.hadoop.fs.LocalFileSystem@1a7f087e, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/local-export-1733842663164, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/local-export-1733842663164/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-10T14:57:43,195 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-10T14:57:43,198 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/snaptb0-testExportWithChecksum to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/local-export-1733842663164/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-10T14:57:43,222 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-10T14:57:43,223 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-10T14:57:43,223 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-10T14:57:43,223 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-10T14:57:44,323 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/hadoop-6040872398864120579.jar 2024-12-10T14:57:44,323 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-10T14:57:44,324 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-10T14:57:44,415 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/hadoop-417704372751365938.jar 2024-12-10T14:57:44,415 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-10T14:57:44,415 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-10T14:57:44,416 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-10T14:57:44,416 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-10T14:57:44,416 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-10T14:57:44,416 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-10T14:57:44,417 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-10T14:57:44,417 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-10T14:57:44,417 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-10T14:57:44,418 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-10T14:57:44,418 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-10T14:57:44,418 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-10T14:57:44,419 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-10T14:57:44,419 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-10T14:57:44,420 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-10T14:57:44,420 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-10T14:57:44,420 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-10T14:57:44,421 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-10T14:57:44,421 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-10T14:57:44,422 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-10T14:57:44,422 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-10T14:57:44,422 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-10T14:57:44,423 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-10T14:57:44,423 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-10T14:57:44,423 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-10T14:57:44,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742250_1426 (size=127628) 2024-12-10T14:57:44,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742250_1426 (size=127628) 2024-12-10T14:57:44,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742250_1426 (size=127628) 2024-12-10T14:57:44,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742251_1427 (size=2172101) 2024-12-10T14:57:44,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742251_1427 (size=2172101) 2024-12-10T14:57:44,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742251_1427 (size=2172101) 2024-12-10T14:57:44,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742252_1428 (size=213228) 2024-12-10T14:57:44,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742252_1428 (size=213228) 2024-12-10T14:57:44,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742252_1428 (size=213228) 2024-12-10T14:57:44,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742253_1429 (size=1877034) 2024-12-10T14:57:44,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742253_1429 (size=1877034) 2024-12-10T14:57:44,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742253_1429 (size=1877034) 2024-12-10T14:57:44,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742254_1430 (size=533455) 2024-12-10T14:57:44,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742254_1430 (size=533455) 2024-12-10T14:57:44,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742254_1430 (size=533455) 2024-12-10T14:57:44,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742255_1431 (size=7280644) 2024-12-10T14:57:44,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742255_1431 (size=7280644) 2024-12-10T14:57:44,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742255_1431 (size=7280644) 2024-12-10T14:57:44,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742256_1432 (size=4188619) 2024-12-10T14:57:44,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742256_1432 (size=4188619) 2024-12-10T14:57:44,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742256_1432 (size=4188619) 2024-12-10T14:57:44,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742257_1433 (size=20406) 2024-12-10T14:57:44,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742257_1433 (size=20406) 2024-12-10T14:57:44,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742257_1433 (size=20406) 2024-12-10T14:57:44,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742258_1434 (size=75495) 2024-12-10T14:57:44,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742258_1434 (size=75495) 2024-12-10T14:57:44,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742258_1434 (size=75495) 2024-12-10T14:57:44,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742259_1435 (size=6350861) 2024-12-10T14:57:44,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742259_1435 (size=6350861) 2024-12-10T14:57:44,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742259_1435 (size=6350861) 2024-12-10T14:57:44,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742260_1436 (size=45609) 2024-12-10T14:57:44,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742260_1436 (size=45609) 2024-12-10T14:57:44,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742260_1436 (size=45609) 2024-12-10T14:57:44,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742261_1437 (size=110084) 2024-12-10T14:57:44,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742261_1437 (size=110084) 2024-12-10T14:57:44,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742261_1437 (size=110084) 2024-12-10T14:57:44,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742262_1438 (size=1323991) 2024-12-10T14:57:44,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742262_1438 (size=1323991) 2024-12-10T14:57:44,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742262_1438 (size=1323991) 2024-12-10T14:57:44,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742263_1439 (size=23076) 2024-12-10T14:57:44,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742263_1439 (size=23076) 2024-12-10T14:57:44,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742263_1439 (size=23076) 2024-12-10T14:57:44,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742264_1440 (size=126803) 2024-12-10T14:57:44,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742264_1440 (size=126803) 2024-12-10T14:57:44,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742264_1440 (size=126803) 2024-12-10T14:57:44,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742265_1441 (size=322274) 2024-12-10T14:57:44,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742265_1441 (size=322274) 2024-12-10T14:57:44,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742265_1441 (size=322274) 2024-12-10T14:57:44,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742266_1442 (size=1832290) 2024-12-10T14:57:44,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742266_1442 (size=1832290) 2024-12-10T14:57:44,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742266_1442 (size=1832290) 2024-12-10T14:57:44,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742267_1443 (size=451756) 2024-12-10T14:57:44,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742267_1443 (size=451756) 2024-12-10T14:57:44,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742267_1443 (size=451756) 2024-12-10T14:57:44,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742268_1444 (size=30081) 2024-12-10T14:57:44,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742268_1444 (size=30081) 2024-12-10T14:57:44,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742268_1444 (size=30081) 2024-12-10T14:57:44,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742269_1445 (size=53616) 2024-12-10T14:57:44,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742269_1445 (size=53616) 2024-12-10T14:57:44,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742269_1445 (size=53616) 2024-12-10T14:57:44,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742270_1446 (size=29229) 2024-12-10T14:57:44,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742270_1446 (size=29229) 2024-12-10T14:57:44,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742270_1446 (size=29229) 2024-12-10T14:57:44,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742271_1447 (size=169089) 2024-12-10T14:57:44,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742271_1447 (size=169089) 2024-12-10T14:57:44,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742271_1447 (size=169089) 2024-12-10T14:57:44,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742272_1448 (size=5175431) 2024-12-10T14:57:44,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742272_1448 (size=5175431) 2024-12-10T14:57:44,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742272_1448 (size=5175431) 2024-12-10T14:57:44,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742273_1449 (size=136454) 2024-12-10T14:57:44,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742273_1449 (size=136454) 2024-12-10T14:57:44,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742273_1449 (size=136454) 2024-12-10T14:57:44,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742274_1450 (size=907858) 2024-12-10T14:57:44,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742274_1450 (size=907858) 2024-12-10T14:57:44,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742274_1450 (size=907858) 2024-12-10T14:57:44,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742275_1451 (size=3317408) 2024-12-10T14:57:44,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742275_1451 (size=3317408) 2024-12-10T14:57:44,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742275_1451 (size=3317408) 2024-12-10T14:57:44,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742276_1452 (size=503880) 2024-12-10T14:57:44,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742276_1452 (size=503880) 2024-12-10T14:57:44,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742276_1452 (size=503880) 2024-12-10T14:57:44,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742277_1453 (size=4695811) 2024-12-10T14:57:44,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742277_1453 (size=4695811) 2024-12-10T14:57:44,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742277_1453 (size=4695811) 2024-12-10T14:57:44,905 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-10T14:57:44,907 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-12-10T14:57:44,909 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K 2024-12-10T14:57:44,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742278_1454 (size=338) 2024-12-10T14:57:44,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742278_1454 (size=338) 2024-12-10T14:57:44,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742278_1454 (size=338) 2024-12-10T14:57:44,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742279_1455 (size=15) 2024-12-10T14:57:44,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742279_1455 (size=15) 2024-12-10T14:57:44,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742279_1455 (size=15) 2024-12-10T14:57:44,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742280_1456 (size=304927) 2024-12-10T14:57:44,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742280_1456 (size=304927) 2024-12-10T14:57:44,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742280_1456 (size=304927) 2024-12-10T14:57:45,696 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-10T14:57:45,696 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-10T14:57:45,699 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733842510486_0007_000001 (auth:SIMPLE) from 127.0.0.1:36700 2024-12-10T14:57:45,711 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-1_2/usercache/jenkins/appcache/application_1733842510486_0007/container_1733842510486_0007_01_000001/launch_container.sh] 2024-12-10T14:57:45,711 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-1_2/usercache/jenkins/appcache/application_1733842510486_0007/container_1733842510486_0007_01_000001/container_tokens] 2024-12-10T14:57:45,711 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-1_2/usercache/jenkins/appcache/application_1733842510486_0007/container_1733842510486_0007_01_000001/sysfs] 2024-12-10T14:57:46,439 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733842510486_0008_000001 (auth:SIMPLE) from 127.0.0.1:51528 2024-12-10T14:57:47,049 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-10T14:57:51,370 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733842510486_0008_000001 (auth:SIMPLE) from 127.0.0.1:37456 2024-12-10T14:57:51,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742281_1457 (size=350601) 2024-12-10T14:57:51,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742281_1457 (size=350601) 2024-12-10T14:57:51,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742281_1457 (size=350601) 2024-12-10T14:57:53,609 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733842510486_0008_000001 (auth:SIMPLE) from 127.0.0.1:50856 2024-12-10T14:57:57,023 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-0_2/usercache/jenkins/appcache/application_1733842510486_0008/container_1733842510486_0008_01_000002/launch_container.sh] 2024-12-10T14:57:57,023 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-0_2/usercache/jenkins/appcache/application_1733842510486_0008/container_1733842510486_0008_01_000002/container_tokens] 2024-12-10T14:57:57,023 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-0_2/usercache/jenkins/appcache/application_1733842510486_0008/container_1733842510486_0008_01_000002/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithChecksum/62c582e8d3f6aff23dc007e67ab43f95/cf/ae000709e7d04715aba3181dabdbece2 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/local-export-1733842663164/archive/data/default/testtb-testExportWithChecksum/62c582e8d3f6aff23dc007e67ab43f95/cf/ae000709e7d04715aba3181dabdbece2. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:596) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:332) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:254) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:180) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-10T14:57:58,458 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733842510486_0008_000001 (auth:SIMPLE) from 127.0.0.1:48912 2024-12-10T14:58:00,924 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-10T14:58:01,978 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-1_0/usercache/jenkins/appcache/application_1733842510486_0008/container_1733842510486_0008_01_000003/launch_container.sh] 2024-12-10T14:58:01,978 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-1_0/usercache/jenkins/appcache/application_1733842510486_0008/container_1733842510486_0008_01_000003/container_tokens] 2024-12-10T14:58:01,978 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-1_0/usercache/jenkins/appcache/application_1733842510486_0008/container_1733842510486_0008_01_000003/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithChecksum/62c582e8d3f6aff23dc007e67ab43f95/cf/ae000709e7d04715aba3181dabdbece2 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/local-export-1733842663164/archive/data/default/testtb-testExportWithChecksum/62c582e8d3f6aff23dc007e67ab43f95/cf/ae000709e7d04715aba3181dabdbece2. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:596) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:332) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:254) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:180) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-10T14:58:03,471 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733842510486_0008_000001 (auth:SIMPLE) from 127.0.0.1:35400 2024-12-10T14:58:04,334 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 32558485cb37ace42cbc3671b13298ba, had cached 0 bytes from a total of 8394 2024-12-10T14:58:04,344 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 3d3c5d37b59f3a27bd963f36e8591d10, had cached 0 bytes from a total of 5216 2024-12-10T14:58:06,024 DEBUG [master/db1d50717577:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 32558485cb37ace42cbc3671b13298ba changed from -1.0 to 0.0, refreshing cache 2024-12-10T14:58:06,024 DEBUG [master/db1d50717577:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region cf14e58a3b5af251a9b5d988345a578d changed from -1.0 to 0.0, refreshing cache 2024-12-10T14:58:06,024 DEBUG [master/db1d50717577:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 3d3c5d37b59f3a27bd963f36e8591d10 changed from -1.0 to 0.0, refreshing cache 2024-12-10T14:58:06,024 DEBUG [master/db1d50717577:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 62c582e8d3f6aff23dc007e67ab43f95 changed from -1.0 to 0.0, refreshing cache 2024-12-10T14:58:06,487 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-1_3/usercache/jenkins/appcache/application_1733842510486_0008/container_1733842510486_0008_01_000004/launch_container.sh] 2024-12-10T14:58:06,487 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-1_3/usercache/jenkins/appcache/application_1733842510486_0008/container_1733842510486_0008_01_000004/container_tokens] 2024-12-10T14:58:06,487 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-1_3/usercache/jenkins/appcache/application_1733842510486_0008/container_1733842510486_0008_01_000004/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithChecksum/62c582e8d3f6aff23dc007e67ab43f95/cf/ae000709e7d04715aba3181dabdbece2 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/local-export-1733842663164/archive/data/default/testtb-testExportWithChecksum/62c582e8d3f6aff23dc007e67ab43f95/cf/ae000709e7d04715aba3181dabdbece2. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:596) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:332) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:254) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:180) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-10T14:58:07,486 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733842510486_0008_000001 (auth:SIMPLE) from 127.0.0.1:38646 2024-12-10T14:58:10,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742282_1458 (size=21340) 2024-12-10T14:58:10,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742282_1458 (size=21340) 2024-12-10T14:58:10,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742282_1458 (size=21340) 2024-12-10T14:58:10,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742283_1459 (size=460) 2024-12-10T14:58:10,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742283_1459 (size=460) 2024-12-10T14:58:10,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742283_1459 (size=460) 2024-12-10T14:58:10,432 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-0_3/usercache/jenkins/appcache/application_1733842510486_0008/container_1733842510486_0008_01_000005/launch_container.sh] 2024-12-10T14:58:10,433 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-0_3/usercache/jenkins/appcache/application_1733842510486_0008/container_1733842510486_0008_01_000005/container_tokens] 2024-12-10T14:58:10,433 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-0_3/usercache/jenkins/appcache/application_1733842510486_0008/container_1733842510486_0008_01_000005/sysfs] 2024-12-10T14:58:10,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742284_1460 (size=21340) 2024-12-10T14:58:10,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742284_1460 (size=21340) 2024-12-10T14:58:10,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742284_1460 (size=21340) 2024-12-10T14:58:10,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742285_1461 (size=350601) 2024-12-10T14:58:10,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742285_1461 (size=350601) 2024-12-10T14:58:10,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742285_1461 (size=350601) 2024-12-10T14:58:12,144 ERROR [Time-limited test {}] snapshot.ExportSnapshot(1227): Snapshot export failed org.apache.hadoop.hbase.snapshot.ExportSnapshotException: Task failed task_1733842510486_0008_m_000000 Job failed as tasks failed. failedMaps:1 failedReduces:0 killedMaps:0 killedReduces: 0 at org.apache.hadoop.hbase.snapshot.ExportSnapshot.runCopyJob(ExportSnapshot.java:935) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1204) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:151) ~[hbase-common-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:523) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportFileSystemState(TestExportSnapshot.java:353) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportWithChecksum(TestExportSnapshot.java:237) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:58:12,145 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842692145 2024-12-10T14:58:12,145 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:36449, tgtDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842692145, rawTgtDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842692145, srcFsUri=hdfs://localhost:36449, srcDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef 2024-12-10T14:58:12,174 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:36449, inputRoot=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef 2024-12-10T14:58:12,175 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_996159037_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842692145, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842692145/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-10T14:58:12,177 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-10T14:58:12,181 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/snaptb0-testExportWithChecksum to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842692145/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-10T14:58:12,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742286_1462 (size=156) 2024-12-10T14:58:12,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742286_1462 (size=156) 2024-12-10T14:58:12,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742286_1462 (size=156) 2024-12-10T14:58:12,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742287_1463 (size=621) 2024-12-10T14:58:12,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742287_1463 (size=621) 2024-12-10T14:58:12,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742287_1463 (size=621) 2024-12-10T14:58:12,197 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-10T14:58:12,198 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-10T14:58:12,198 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-10T14:58:12,198 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-10T14:58:13,231 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/hadoop-16235056590989996087.jar 2024-12-10T14:58:13,231 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-10T14:58:13,232 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-10T14:58:13,305 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/hadoop-652052425150857614.jar 2024-12-10T14:58:13,306 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-10T14:58:13,306 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-10T14:58:13,306 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-10T14:58:13,306 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-10T14:58:13,306 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-10T14:58:13,307 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-10T14:58:13,307 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-10T14:58:13,307 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-10T14:58:13,307 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-10T14:58:13,308 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-10T14:58:13,308 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-10T14:58:13,308 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-10T14:58:13,309 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-10T14:58:13,309 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-10T14:58:13,309 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-10T14:58:13,309 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-10T14:58:13,310 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-10T14:58:13,310 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-10T14:58:13,310 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-10T14:58:13,311 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-10T14:58:13,311 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-10T14:58:13,311 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-10T14:58:13,312 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-10T14:58:13,312 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-10T14:58:13,312 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-10T14:58:13,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742288_1464 (size=127628) 2024-12-10T14:58:13,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742288_1464 (size=127628) 2024-12-10T14:58:13,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742288_1464 (size=127628) 2024-12-10T14:58:13,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742289_1465 (size=2172101) 2024-12-10T14:58:13,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742289_1465 (size=2172101) 2024-12-10T14:58:13,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742289_1465 (size=2172101) 2024-12-10T14:58:13,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742290_1466 (size=213228) 2024-12-10T14:58:13,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742290_1466 (size=213228) 2024-12-10T14:58:13,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742290_1466 (size=213228) 2024-12-10T14:58:13,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742291_1467 (size=1877034) 2024-12-10T14:58:13,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742291_1467 (size=1877034) 2024-12-10T14:58:13,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742291_1467 (size=1877034) 2024-12-10T14:58:13,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742292_1468 (size=533455) 2024-12-10T14:58:13,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742292_1468 (size=533455) 2024-12-10T14:58:13,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742292_1468 (size=533455) 2024-12-10T14:58:13,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742293_1469 (size=7280644) 2024-12-10T14:58:13,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742293_1469 (size=7280644) 2024-12-10T14:58:13,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742293_1469 (size=7280644) 2024-12-10T14:58:13,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742294_1470 (size=4188619) 2024-12-10T14:58:13,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742294_1470 (size=4188619) 2024-12-10T14:58:13,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742294_1470 (size=4188619) 2024-12-10T14:58:13,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742295_1471 (size=20406) 2024-12-10T14:58:13,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742295_1471 (size=20406) 2024-12-10T14:58:13,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742295_1471 (size=20406) 2024-12-10T14:58:13,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742296_1472 (size=75495) 2024-12-10T14:58:13,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742296_1472 (size=75495) 2024-12-10T14:58:13,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742296_1472 (size=75495) 2024-12-10T14:58:13,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742297_1473 (size=451756) 2024-12-10T14:58:13,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742297_1473 (size=451756) 2024-12-10T14:58:13,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742297_1473 (size=451756) 2024-12-10T14:58:13,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742298_1474 (size=45609) 2024-12-10T14:58:13,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742298_1474 (size=45609) 2024-12-10T14:58:13,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742298_1474 (size=45609) 2024-12-10T14:58:13,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742299_1475 (size=110084) 2024-12-10T14:58:13,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742299_1475 (size=110084) 2024-12-10T14:58:13,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742299_1475 (size=110084) 2024-12-10T14:58:13,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742300_1476 (size=1323991) 2024-12-10T14:58:13,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742300_1476 (size=1323991) 2024-12-10T14:58:13,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742300_1476 (size=1323991) 2024-12-10T14:58:13,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742301_1477 (size=23076) 2024-12-10T14:58:13,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742301_1477 (size=23076) 2024-12-10T14:58:13,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742301_1477 (size=23076) 2024-12-10T14:58:13,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742302_1478 (size=126803) 2024-12-10T14:58:13,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742302_1478 (size=126803) 2024-12-10T14:58:13,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742302_1478 (size=126803) 2024-12-10T14:58:13,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742303_1479 (size=322274) 2024-12-10T14:58:13,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742303_1479 (size=322274) 2024-12-10T14:58:13,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742303_1479 (size=322274) 2024-12-10T14:58:13,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742304_1480 (size=6350861) 2024-12-10T14:58:13,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742304_1480 (size=6350861) 2024-12-10T14:58:13,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742304_1480 (size=6350861) 2024-12-10T14:58:13,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742305_1481 (size=1832290) 2024-12-10T14:58:13,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742305_1481 (size=1832290) 2024-12-10T14:58:13,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742305_1481 (size=1832290) 2024-12-10T14:58:13,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742306_1482 (size=30081) 2024-12-10T14:58:13,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742306_1482 (size=30081) 2024-12-10T14:58:13,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742306_1482 (size=30081) 2024-12-10T14:58:13,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742307_1483 (size=53616) 2024-12-10T14:58:13,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742307_1483 (size=53616) 2024-12-10T14:58:13,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742307_1483 (size=53616) 2024-12-10T14:58:14,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742308_1484 (size=29229) 2024-12-10T14:58:14,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742308_1484 (size=29229) 2024-12-10T14:58:14,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742308_1484 (size=29229) 2024-12-10T14:58:14,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742309_1485 (size=169089) 2024-12-10T14:58:14,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742309_1485 (size=169089) 2024-12-10T14:58:14,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742309_1485 (size=169089) 2024-12-10T14:58:14,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742310_1486 (size=5175431) 2024-12-10T14:58:14,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742310_1486 (size=5175431) 2024-12-10T14:58:14,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742310_1486 (size=5175431) 2024-12-10T14:58:14,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742311_1487 (size=136454) 2024-12-10T14:58:14,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742311_1487 (size=136454) 2024-12-10T14:58:14,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742311_1487 (size=136454) 2024-12-10T14:58:14,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742312_1488 (size=907858) 2024-12-10T14:58:14,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742312_1488 (size=907858) 2024-12-10T14:58:14,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742312_1488 (size=907858) 2024-12-10T14:58:14,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742313_1489 (size=3317408) 2024-12-10T14:58:14,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742313_1489 (size=3317408) 2024-12-10T14:58:14,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742313_1489 (size=3317408) 2024-12-10T14:58:14,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742314_1490 (size=503880) 2024-12-10T14:58:14,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742314_1490 (size=503880) 2024-12-10T14:58:14,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742314_1490 (size=503880) 2024-12-10T14:58:14,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742315_1491 (size=4695811) 2024-12-10T14:58:14,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742315_1491 (size=4695811) 2024-12-10T14:58:14,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742315_1491 (size=4695811) 2024-12-10T14:58:14,509 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-10T14:58:14,511 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-12-10T14:58:14,513 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K 2024-12-10T14:58:14,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742316_1492 (size=338) 2024-12-10T14:58:14,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742316_1492 (size=338) 2024-12-10T14:58:14,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742316_1492 (size=338) 2024-12-10T14:58:14,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742317_1493 (size=15) 2024-12-10T14:58:14,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742317_1493 (size=15) 2024-12-10T14:58:14,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742317_1493 (size=15) 2024-12-10T14:58:14,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742318_1494 (size=304879) 2024-12-10T14:58:14,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742318_1494 (size=304879) 2024-12-10T14:58:14,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742318_1494 (size=304879) 2024-12-10T14:58:16,551 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-10T14:58:16,551 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-10T14:58:16,555 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733842510486_0008_000001 (auth:SIMPLE) from 127.0.0.1:41768 2024-12-10T14:58:16,566 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-0_3/usercache/jenkins/appcache/application_1733842510486_0008/container_1733842510486_0008_01_000001/launch_container.sh] 2024-12-10T14:58:16,566 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-0_3/usercache/jenkins/appcache/application_1733842510486_0008/container_1733842510486_0008_01_000001/container_tokens] 2024-12-10T14:58:16,566 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-0_3/usercache/jenkins/appcache/application_1733842510486_0008/container_1733842510486_0008_01_000001/sysfs] 2024-12-10T14:58:17,478 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733842510486_0009_000001 (auth:SIMPLE) from 127.0.0.1:58656 2024-12-10T14:58:22,322 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733842510486_0009_000001 (auth:SIMPLE) from 127.0.0.1:50640 2024-12-10T14:58:22,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742319_1495 (size=350553) 2024-12-10T14:58:22,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742319_1495 (size=350553) 2024-12-10T14:58:22,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742319_1495 (size=350553) 2024-12-10T14:58:24,610 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733842510486_0009_000001 (auth:SIMPLE) from 127.0.0.1:39258 2024-12-10T14:58:27,255 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 62c582e8d3f6aff23dc007e67ab43f95, had cached 0 bytes from a total of 8326 2024-12-10T14:58:27,255 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region cf14e58a3b5af251a9b5d988345a578d, had cached 0 bytes from a total of 5286 2024-12-10T14:58:28,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742320_1496 (size=8326) 2024-12-10T14:58:28,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742320_1496 (size=8326) 2024-12-10T14:58:28,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742320_1496 (size=8326) 2024-12-10T14:58:28,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742321_1497 (size=5286) 2024-12-10T14:58:28,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742321_1497 (size=5286) 2024-12-10T14:58:28,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742321_1497 (size=5286) 2024-12-10T14:58:28,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742322_1498 (size=17413) 2024-12-10T14:58:28,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742322_1498 (size=17413) 2024-12-10T14:58:28,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742322_1498 (size=17413) 2024-12-10T14:58:28,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742323_1499 (size=462) 2024-12-10T14:58:28,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742323_1499 (size=462) 2024-12-10T14:58:28,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742323_1499 (size=462) 2024-12-10T14:58:28,222 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-1_1/usercache/jenkins/appcache/application_1733842510486_0009/container_1733842510486_0009_01_000002/launch_container.sh] 2024-12-10T14:58:28,223 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-1_1/usercache/jenkins/appcache/application_1733842510486_0009/container_1733842510486_0009_01_000002/container_tokens] 2024-12-10T14:58:28,223 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-1_1/usercache/jenkins/appcache/application_1733842510486_0009/container_1733842510486_0009_01_000002/sysfs] 2024-12-10T14:58:28,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742324_1500 (size=17413) 2024-12-10T14:58:28,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742324_1500 (size=17413) 2024-12-10T14:58:28,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742324_1500 (size=17413) 2024-12-10T14:58:28,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742325_1501 (size=350553) 2024-12-10T14:58:28,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742325_1501 (size=350553) 2024-12-10T14:58:28,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742325_1501 (size=350553) 2024-12-10T14:58:28,287 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733842510486_0009_000001 (auth:SIMPLE) from 127.0.0.1:39264 2024-12-10T14:58:29,704 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-10T14:58:29,706 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-10T14:58:29,711 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb0-testExportWithChecksum 2024-12-10T14:58:29,711 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-10T14:58:29,711 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-10T14:58:29,711 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_996159037_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-10T14:58:29,712 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-12-10T14:58:29,712 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-12-10T14:58:29,712 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_996159037_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842692145/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842692145/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-10T14:58:29,712 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842692145/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-12-10T14:58:29,712 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842692145/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-12-10T14:58:29,717 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportWithChecksum 2024-12-10T14:58:29,717 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportWithChecksum 2024-12-10T14:58:29,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] procedure2.ProcedureExecutor(1098): Stored pid=191, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportWithChecksum 2024-12-10T14:58:29,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=191 2024-12-10T14:58:29,720 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733842709720"}]},"ts":"1733842709720"} 2024-12-10T14:58:29,721 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithChecksum, state=DISABLING in hbase:meta 2024-12-10T14:58:29,723 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithChecksum to state=DISABLING 2024-12-10T14:58:29,723 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=192, ppid=191, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportWithChecksum}] 2024-12-10T14:58:29,724 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=193, ppid=192, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=cf14e58a3b5af251a9b5d988345a578d, UNASSIGN}, {pid=194, ppid=192, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=62c582e8d3f6aff23dc007e67ab43f95, UNASSIGN}] 2024-12-10T14:58:29,725 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=194, ppid=192, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=62c582e8d3f6aff23dc007e67ab43f95, UNASSIGN 2024-12-10T14:58:29,725 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=193, ppid=192, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=cf14e58a3b5af251a9b5d988345a578d, UNASSIGN 2024-12-10T14:58:29,726 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=194 updating hbase:meta row=62c582e8d3f6aff23dc007e67ab43f95, regionState=CLOSING, regionLocation=db1d50717577,45255,1733842503015 2024-12-10T14:58:29,726 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=193 updating hbase:meta row=cf14e58a3b5af251a9b5d988345a578d, regionState=CLOSING, regionLocation=db1d50717577,37707,1733842502956 2024-12-10T14:58:29,727 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-10T14:58:29,727 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=195, ppid=193, state=RUNNABLE; CloseRegionProcedure cf14e58a3b5af251a9b5d988345a578d, server=db1d50717577,37707,1733842502956}] 2024-12-10T14:58:29,727 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-10T14:58:29,727 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=196, ppid=194, state=RUNNABLE; CloseRegionProcedure 62c582e8d3f6aff23dc007e67ab43f95, server=db1d50717577,45255,1733842503015}] 2024-12-10T14:58:29,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=191 2024-12-10T14:58:29,878 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,37707,1733842502956 2024-12-10T14:58:29,879 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] handler.UnassignRegionHandler(124): Close cf14e58a3b5af251a9b5d988345a578d 2024-12-10T14:58:29,879 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-10T14:58:29,879 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1681): Closing cf14e58a3b5af251a9b5d988345a578d, disabling compactions & flushes 2024-12-10T14:58:29,879 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1703): Closing region testtb-testExportWithChecksum,,1733842661859.cf14e58a3b5af251a9b5d988345a578d. 2024-12-10T14:58:29,879 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithChecksum,,1733842661859.cf14e58a3b5af251a9b5d988345a578d. 2024-12-10T14:58:29,879 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithChecksum,,1733842661859.cf14e58a3b5af251a9b5d988345a578d. after waiting 0 ms 2024-12-10T14:58:29,879 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithChecksum,,1733842661859.cf14e58a3b5af251a9b5d988345a578d. 2024-12-10T14:58:29,879 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,45255,1733842503015 2024-12-10T14:58:29,879 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] handler.UnassignRegionHandler(124): Close 62c582e8d3f6aff23dc007e67ab43f95 2024-12-10T14:58:29,880 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-10T14:58:29,880 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1681): Closing 62c582e8d3f6aff23dc007e67ab43f95, disabling compactions & flushes 2024-12-10T14:58:29,880 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1703): Closing region testtb-testExportWithChecksum,1,1733842661859.62c582e8d3f6aff23dc007e67ab43f95. 2024-12-10T14:58:29,880 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithChecksum,1,1733842661859.62c582e8d3f6aff23dc007e67ab43f95. 2024-12-10T14:58:29,880 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithChecksum,1,1733842661859.62c582e8d3f6aff23dc007e67ab43f95. after waiting 0 ms 2024-12-10T14:58:29,880 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithChecksum,1,1733842661859.62c582e8d3f6aff23dc007e67ab43f95. 2024-12-10T14:58:29,883 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithChecksum/cf14e58a3b5af251a9b5d988345a578d/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-10T14:58:29,883 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithChecksum/62c582e8d3f6aff23dc007e67ab43f95/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-10T14:58:29,884 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-10T14:58:29,884 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-10T14:58:29,884 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1922): Closed testtb-testExportWithChecksum,,1733842661859.cf14e58a3b5af251a9b5d988345a578d. 2024-12-10T14:58:29,884 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1922): Closed testtb-testExportWithChecksum,1,1733842661859.62c582e8d3f6aff23dc007e67ab43f95. 2024-12-10T14:58:29,884 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1635): Region close journal for cf14e58a3b5af251a9b5d988345a578d: 2024-12-10T14:58:29,884 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1635): Region close journal for 62c582e8d3f6aff23dc007e67ab43f95: 2024-12-10T14:58:29,885 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] handler.UnassignRegionHandler(170): Closed cf14e58a3b5af251a9b5d988345a578d 2024-12-10T14:58:29,886 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=193 updating hbase:meta row=cf14e58a3b5af251a9b5d988345a578d, regionState=CLOSED 2024-12-10T14:58:29,886 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] handler.UnassignRegionHandler(170): Closed 62c582e8d3f6aff23dc007e67ab43f95 2024-12-10T14:58:29,886 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=194 updating hbase:meta row=62c582e8d3f6aff23dc007e67ab43f95, regionState=CLOSED 2024-12-10T14:58:29,888 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=195, resume processing ppid=193 2024-12-10T14:58:29,889 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=195, ppid=193, state=SUCCESS; CloseRegionProcedure cf14e58a3b5af251a9b5d988345a578d, server=db1d50717577,37707,1733842502956 in 160 msec 2024-12-10T14:58:29,889 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=196, resume processing ppid=194 2024-12-10T14:58:29,889 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=193, ppid=192, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=cf14e58a3b5af251a9b5d988345a578d, UNASSIGN in 164 msec 2024-12-10T14:58:29,889 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=196, ppid=194, state=SUCCESS; CloseRegionProcedure 62c582e8d3f6aff23dc007e67ab43f95, server=db1d50717577,45255,1733842503015 in 160 msec 2024-12-10T14:58:29,890 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=194, resume processing ppid=192 2024-12-10T14:58:29,890 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=194, ppid=192, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=62c582e8d3f6aff23dc007e67ab43f95, UNASSIGN in 165 msec 2024-12-10T14:58:29,892 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=192, resume processing ppid=191 2024-12-10T14:58:29,892 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=192, ppid=191, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportWithChecksum in 168 msec 2024-12-10T14:58:29,892 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733842709892"}]},"ts":"1733842709892"} 2024-12-10T14:58:29,893 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithChecksum, state=DISABLED in hbase:meta 2024-12-10T14:58:29,895 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithChecksum to state=DISABLED 2024-12-10T14:58:29,896 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=191, state=SUCCESS; DisableTableProcedure table=testtb-testExportWithChecksum in 179 msec 2024-12-10T14:58:30,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=191 2024-12-10T14:58:30,021 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportWithChecksum, procId: 191 completed 2024-12-10T14:58:30,022 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportWithChecksum 2024-12-10T14:58:30,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] procedure2.ProcedureExecutor(1098): Stored pid=197, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-10T14:58:30,023 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=197, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-10T14:58:30,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportWithChecksum 2024-12-10T14:58:30,024 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=197, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-10T14:58:30,025 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41775 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportWithChecksum 2024-12-10T14:58:30,027 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithChecksum/cf14e58a3b5af251a9b5d988345a578d 2024-12-10T14:58:30,027 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithChecksum/62c582e8d3f6aff23dc007e67ab43f95 2024-12-10T14:58:30,028 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithChecksum/62c582e8d3f6aff23dc007e67ab43f95/cf, FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithChecksum/62c582e8d3f6aff23dc007e67ab43f95/recovered.edits] 2024-12-10T14:58:30,028 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithChecksum/cf14e58a3b5af251a9b5d988345a578d/cf, FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithChecksum/cf14e58a3b5af251a9b5d988345a578d/recovered.edits] 2024-12-10T14:58:30,029 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-10T14:58:30,029 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-10T14:58:30,029 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-10T14:58:30,029 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45255-0x1019cdee8310003, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-10T14:58:30,029 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-10T14:58:30,029 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-10T14:58:30,029 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-10T14:58:30,030 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-10T14:58:30,030 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-10T14:58:30,030 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45255-0x1019cdee8310003, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-10T14:58:30,030 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:58:30,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45255-0x1019cdee8310003, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:58:30,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-10T14:58:30,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-10T14:58:30,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:58:30,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:58:30,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=197 2024-12-10T14:58:30,032 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-10T14:58:30,032 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-10T14:58:30,032 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-10T14:58:30,032 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-10T14:58:30,033 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithChecksum/cf14e58a3b5af251a9b5d988345a578d/cf/b56b59cde73e4285bdc11cc9ea77d9ea to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/archive/data/default/testtb-testExportWithChecksum/cf14e58a3b5af251a9b5d988345a578d/cf/b56b59cde73e4285bdc11cc9ea77d9ea 2024-12-10T14:58:30,033 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithChecksum/62c582e8d3f6aff23dc007e67ab43f95/cf/ae000709e7d04715aba3181dabdbece2 to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/archive/data/default/testtb-testExportWithChecksum/62c582e8d3f6aff23dc007e67ab43f95/cf/ae000709e7d04715aba3181dabdbece2 2024-12-10T14:58:30,036 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithChecksum/cf14e58a3b5af251a9b5d988345a578d/recovered.edits/9.seqid to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/archive/data/default/testtb-testExportWithChecksum/cf14e58a3b5af251a9b5d988345a578d/recovered.edits/9.seqid 2024-12-10T14:58:30,037 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithChecksum/cf14e58a3b5af251a9b5d988345a578d 2024-12-10T14:58:30,037 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithChecksum/62c582e8d3f6aff23dc007e67ab43f95/recovered.edits/9.seqid to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/archive/data/default/testtb-testExportWithChecksum/62c582e8d3f6aff23dc007e67ab43f95/recovered.edits/9.seqid 2024-12-10T14:58:30,038 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportWithChecksum/62c582e8d3f6aff23dc007e67ab43f95 2024-12-10T14:58:30,038 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithChecksum regions 2024-12-10T14:58:30,039 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=197, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-10T14:58:30,041 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportWithChecksum from hbase:meta 2024-12-10T14:58:30,043 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportWithChecksum' descriptor. 2024-12-10T14:58:30,044 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=197, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-10T14:58:30,044 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportWithChecksum' from region states. 2024-12-10T14:58:30,044 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,,1733842661859.cf14e58a3b5af251a9b5d988345a578d.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733842710044"}]},"ts":"9223372036854775807"} 2024-12-10T14:58:30,044 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,1,1733842661859.62c582e8d3f6aff23dc007e67ab43f95.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733842710044"}]},"ts":"9223372036854775807"} 2024-12-10T14:58:30,046 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-10T14:58:30,046 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => cf14e58a3b5af251a9b5d988345a578d, NAME => 'testtb-testExportWithChecksum,,1733842661859.cf14e58a3b5af251a9b5d988345a578d.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 62c582e8d3f6aff23dc007e67ab43f95, NAME => 'testtb-testExportWithChecksum,1,1733842661859.62c582e8d3f6aff23dc007e67ab43f95.', STARTKEY => '1', ENDKEY => ''}] 2024-12-10T14:58:30,046 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportWithChecksum' as deleted. 2024-12-10T14:58:30,046 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733842710046"}]},"ts":"9223372036854775807"} 2024-12-10T14:58:30,047 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportWithChecksum state from META 2024-12-10T14:58:30,049 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=197, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-10T14:58:30,050 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=197, state=SUCCESS; DeleteTableProcedure table=testtb-testExportWithChecksum in 27 msec 2024-12-10T14:58:30,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=197 2024-12-10T14:58:30,133 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportWithChecksum, procId: 197 completed 2024-12-10T14:58:30,138 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithChecksum" 2024-12-10T14:58:30,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportWithChecksum 2024-12-10T14:58:30,140 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithChecksum" 2024-12-10T14:58:30,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportWithChecksum 2024-12-10T14:58:30,176 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithChecksum Thread=812 (was 816), OpenFileDescriptor=810 (was 823), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=369 (was 447), ProcessCount=17 (was 17), AvailableMemoryMB=561 (was 658) 2024-12-10T14:58:30,176 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=812 is superior to 500 2024-12-10T14:58:30,193 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=812, OpenFileDescriptor=810, MaxFileDescriptor=1048576, SystemLoadAverage=369, ProcessCount=17, AvailableMemoryMB=561 2024-12-10T14:58:30,193 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=812 is superior to 500 2024-12-10T14:58:30,195 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-10T14:58:30,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] procedure2.ProcedureExecutor(1098): Stored pid=198, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-10T14:58:30,196 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_PRE_OPERATION 2024-12-10T14:58:30,196 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:58:30,196 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithSkipTmp" procId is: 198 2024-12-10T14:58:30,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=198 2024-12-10T14:58:30,197 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-10T14:58:30,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742326_1502 (size=418) 2024-12-10T14:58:30,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742326_1502 (size=418) 2024-12-10T14:58:30,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742326_1502 (size=418) 2024-12-10T14:58:30,205 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 7d77f1bda4bf89952efbc61133d5c34a, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1733842710194.7d77f1bda4bf89952efbc61133d5c34a.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef 2024-12-10T14:58:30,206 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => ab2092c60854e26deb73b20350b7a268, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1733842710194.ab2092c60854e26deb73b20350b7a268.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef 2024-12-10T14:58:30,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742327_1503 (size=79) 2024-12-10T14:58:30,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742327_1503 (size=79) 2024-12-10T14:58:30,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742328_1504 (size=79) 2024-12-10T14:58:30,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742328_1504 (size=79) 2024-12-10T14:58:30,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742327_1503 (size=79) 2024-12-10T14:58:30,212 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1733842710194.ab2092c60854e26deb73b20350b7a268.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:58:30,212 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1681): Closing ab2092c60854e26deb73b20350b7a268, disabling compactions & flushes 2024-12-10T14:58:30,212 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1733842710194.ab2092c60854e26deb73b20350b7a268. 2024-12-10T14:58:30,212 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733842710194.ab2092c60854e26deb73b20350b7a268. 2024-12-10T14:58:30,213 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733842710194.ab2092c60854e26deb73b20350b7a268. after waiting 0 ms 2024-12-10T14:58:30,213 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1733842710194.ab2092c60854e26deb73b20350b7a268. 2024-12-10T14:58:30,213 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1733842710194.ab2092c60854e26deb73b20350b7a268. 2024-12-10T14:58:30,213 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1635): Region close journal for ab2092c60854e26deb73b20350b7a268: 2024-12-10T14:58:30,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742328_1504 (size=79) 2024-12-10T14:58:30,213 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1733842710194.7d77f1bda4bf89952efbc61133d5c34a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:58:30,213 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1681): Closing 7d77f1bda4bf89952efbc61133d5c34a, disabling compactions & flushes 2024-12-10T14:58:30,213 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1733842710194.7d77f1bda4bf89952efbc61133d5c34a. 2024-12-10T14:58:30,213 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733842710194.7d77f1bda4bf89952efbc61133d5c34a. 2024-12-10T14:58:30,213 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733842710194.7d77f1bda4bf89952efbc61133d5c34a. after waiting 0 ms 2024-12-10T14:58:30,213 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1733842710194.7d77f1bda4bf89952efbc61133d5c34a. 2024-12-10T14:58:30,213 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithSkipTmp,,1733842710194.7d77f1bda4bf89952efbc61133d5c34a. 2024-12-10T14:58:30,213 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1635): Region close journal for 7d77f1bda4bf89952efbc61133d5c34a: 2024-12-10T14:58:30,214 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ADD_TO_META 2024-12-10T14:58:30,214 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1733842710194.ab2092c60854e26deb73b20350b7a268.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1733842710214"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733842710214"}]},"ts":"1733842710214"} 2024-12-10T14:58:30,214 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1733842710194.7d77f1bda4bf89952efbc61133d5c34a.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1733842710214"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733842710214"}]},"ts":"1733842710214"} 2024-12-10T14:58:30,216 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-10T14:58:30,217 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-10T14:58:30,217 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733842710217"}]},"ts":"1733842710217"} 2024-12-10T14:58:30,218 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLING in hbase:meta 2024-12-10T14:58:30,223 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(202): Hosts are {db1d50717577=0} racks are {/default-rack=0} 2024-12-10T14:58:30,224 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-10T14:58:30,224 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-10T14:58:30,224 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-10T14:58:30,224 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-10T14:58:30,224 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-10T14:58:30,224 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-10T14:58:30,224 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-10T14:58:30,224 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=199, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=7d77f1bda4bf89952efbc61133d5c34a, ASSIGN}, {pid=200, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=ab2092c60854e26deb73b20350b7a268, ASSIGN}] 2024-12-10T14:58:30,225 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=200, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=ab2092c60854e26deb73b20350b7a268, ASSIGN 2024-12-10T14:58:30,225 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=199, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=7d77f1bda4bf89952efbc61133d5c34a, ASSIGN 2024-12-10T14:58:30,225 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=200, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=ab2092c60854e26deb73b20350b7a268, ASSIGN; state=OFFLINE, location=db1d50717577,45255,1733842503015; forceNewPlan=false, retain=false 2024-12-10T14:58:30,225 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=199, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=7d77f1bda4bf89952efbc61133d5c34a, ASSIGN; state=OFFLINE, location=db1d50717577,41775,1733842502887; forceNewPlan=false, retain=false 2024-12-10T14:58:30,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=198 2024-12-10T14:58:30,376 INFO [db1d50717577:36533 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-10T14:58:30,376 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=199 updating hbase:meta row=7d77f1bda4bf89952efbc61133d5c34a, regionState=OPENING, regionLocation=db1d50717577,41775,1733842502887 2024-12-10T14:58:30,376 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=200 updating hbase:meta row=ab2092c60854e26deb73b20350b7a268, regionState=OPENING, regionLocation=db1d50717577,45255,1733842503015 2024-12-10T14:58:30,378 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=201, ppid=200, state=RUNNABLE; OpenRegionProcedure ab2092c60854e26deb73b20350b7a268, server=db1d50717577,45255,1733842503015}] 2024-12-10T14:58:30,378 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=202, ppid=199, state=RUNNABLE; OpenRegionProcedure 7d77f1bda4bf89952efbc61133d5c34a, server=db1d50717577,41775,1733842502887}] 2024-12-10T14:58:30,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=198 2024-12-10T14:58:30,529 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,41775,1733842502887 2024-12-10T14:58:30,529 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,45255,1733842503015 2024-12-10T14:58:30,532 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithSkipTmp,1,1733842710194.ab2092c60854e26deb73b20350b7a268. 2024-12-10T14:58:30,532 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(7285): Opening region: {ENCODED => ab2092c60854e26deb73b20350b7a268, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1733842710194.ab2092c60854e26deb73b20350b7a268.', STARTKEY => '1', ENDKEY => ''} 2024-12-10T14:58:30,532 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithSkipTmp,,1733842710194.7d77f1bda4bf89952efbc61133d5c34a. 2024-12-10T14:58:30,532 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,1,1733842710194.ab2092c60854e26deb73b20350b7a268. service=AccessControlService 2024-12-10T14:58:30,532 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(7285): Opening region: {ENCODED => 7d77f1bda4bf89952efbc61133d5c34a, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1733842710194.7d77f1bda4bf89952efbc61133d5c34a.', STARTKEY => '', ENDKEY => '1'} 2024-12-10T14:58:30,532 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-10T14:58:30,532 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,,1733842710194.7d77f1bda4bf89952efbc61133d5c34a. service=AccessControlService 2024-12-10T14:58:30,533 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-10T14:58:30,533 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp ab2092c60854e26deb73b20350b7a268 2024-12-10T14:58:30,533 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1733842710194.ab2092c60854e26deb73b20350b7a268.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:58:30,533 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp 7d77f1bda4bf89952efbc61133d5c34a 2024-12-10T14:58:30,533 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(7327): checking encryption for ab2092c60854e26deb73b20350b7a268 2024-12-10T14:58:30,533 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1733842710194.7d77f1bda4bf89952efbc61133d5c34a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:58:30,533 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(7330): checking classloading for ab2092c60854e26deb73b20350b7a268 2024-12-10T14:58:30,533 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(7327): checking encryption for 7d77f1bda4bf89952efbc61133d5c34a 2024-12-10T14:58:30,533 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(7330): checking classloading for 7d77f1bda4bf89952efbc61133d5c34a 2024-12-10T14:58:30,534 INFO [StoreOpener-ab2092c60854e26deb73b20350b7a268-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region ab2092c60854e26deb73b20350b7a268 2024-12-10T14:58:30,534 INFO [StoreOpener-7d77f1bda4bf89952efbc61133d5c34a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 7d77f1bda4bf89952efbc61133d5c34a 2024-12-10T14:58:30,536 INFO [StoreOpener-ab2092c60854e26deb73b20350b7a268-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ab2092c60854e26deb73b20350b7a268 columnFamilyName cf 2024-12-10T14:58:30,536 INFO [StoreOpener-7d77f1bda4bf89952efbc61133d5c34a-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7d77f1bda4bf89952efbc61133d5c34a columnFamilyName cf 2024-12-10T14:58:30,536 DEBUG [StoreOpener-7d77f1bda4bf89952efbc61133d5c34a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:58:30,536 DEBUG [StoreOpener-ab2092c60854e26deb73b20350b7a268-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:58:30,536 INFO [StoreOpener-7d77f1bda4bf89952efbc61133d5c34a-1 {}] regionserver.HStore(327): Store=7d77f1bda4bf89952efbc61133d5c34a/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T14:58:30,536 INFO [StoreOpener-ab2092c60854e26deb73b20350b7a268-1 {}] regionserver.HStore(327): Store=ab2092c60854e26deb73b20350b7a268/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T14:58:30,537 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithSkipTmp/7d77f1bda4bf89952efbc61133d5c34a 2024-12-10T14:58:30,537 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithSkipTmp/ab2092c60854e26deb73b20350b7a268 2024-12-10T14:58:30,537 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithSkipTmp/7d77f1bda4bf89952efbc61133d5c34a 2024-12-10T14:58:30,537 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithSkipTmp/ab2092c60854e26deb73b20350b7a268 2024-12-10T14:58:30,539 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(1085): writing seq id for ab2092c60854e26deb73b20350b7a268 2024-12-10T14:58:30,539 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(1085): writing seq id for 7d77f1bda4bf89952efbc61133d5c34a 2024-12-10T14:58:30,541 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithSkipTmp/ab2092c60854e26deb73b20350b7a268/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T14:58:30,541 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithSkipTmp/7d77f1bda4bf89952efbc61133d5c34a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T14:58:30,541 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(1102): Opened ab2092c60854e26deb73b20350b7a268; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71336638, jitterRate=0.06299874186515808}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-10T14:58:30,541 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(1102): Opened 7d77f1bda4bf89952efbc61133d5c34a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65751055, jitterRate=-0.020232930779457092}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-10T14:58:30,542 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(1001): Region open journal for ab2092c60854e26deb73b20350b7a268: 2024-12-10T14:58:30,542 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(1001): Region open journal for 7d77f1bda4bf89952efbc61133d5c34a: 2024-12-10T14:58:30,542 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,1,1733842710194.ab2092c60854e26deb73b20350b7a268., pid=201, masterSystemTime=1733842710529 2024-12-10T14:58:30,542 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,,1733842710194.7d77f1bda4bf89952efbc61133d5c34a., pid=202, masterSystemTime=1733842710529 2024-12-10T14:58:30,544 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,1,1733842710194.ab2092c60854e26deb73b20350b7a268. 2024-12-10T14:58:30,544 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithSkipTmp,1,1733842710194.ab2092c60854e26deb73b20350b7a268. 2024-12-10T14:58:30,544 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=200 updating hbase:meta row=ab2092c60854e26deb73b20350b7a268, regionState=OPEN, openSeqNum=2, regionLocation=db1d50717577,45255,1733842503015 2024-12-10T14:58:30,544 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,,1733842710194.7d77f1bda4bf89952efbc61133d5c34a. 2024-12-10T14:58:30,544 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithSkipTmp,,1733842710194.7d77f1bda4bf89952efbc61133d5c34a. 2024-12-10T14:58:30,545 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=199 updating hbase:meta row=7d77f1bda4bf89952efbc61133d5c34a, regionState=OPEN, openSeqNum=2, regionLocation=db1d50717577,41775,1733842502887 2024-12-10T14:58:30,547 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=201, resume processing ppid=200 2024-12-10T14:58:30,547 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=202, resume processing ppid=199 2024-12-10T14:58:30,547 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=200, ppid=198, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=ab2092c60854e26deb73b20350b7a268, ASSIGN in 323 msec 2024-12-10T14:58:30,547 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=202, ppid=199, state=SUCCESS; OpenRegionProcedure 7d77f1bda4bf89952efbc61133d5c34a, server=db1d50717577,41775,1733842502887 in 168 msec 2024-12-10T14:58:30,547 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=201, ppid=200, state=SUCCESS; OpenRegionProcedure ab2092c60854e26deb73b20350b7a268, server=db1d50717577,45255,1733842503015 in 168 msec 2024-12-10T14:58:30,548 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=199, resume processing ppid=198 2024-12-10T14:58:30,548 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=199, ppid=198, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=7d77f1bda4bf89952efbc61133d5c34a, ASSIGN in 323 msec 2024-12-10T14:58:30,549 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-10T14:58:30,549 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733842710549"}]},"ts":"1733842710549"} 2024-12-10T14:58:30,550 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLED in hbase:meta 2024-12-10T14:58:30,552 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_POST_OPERATION 2024-12-10T14:58:30,552 DEBUG [PEWorker-1 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportFileSystemStateWithSkipTmp jenkins: RWXCA 2024-12-10T14:58:30,554 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41775 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-10T14:58:30,571 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45255-0x1019cdee8310003, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:58:30,571 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:58:30,571 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:58:30,571 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:58:30,573 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-10T14:58:30,573 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-10T14:58:30,573 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-10T14:58:30,573 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-10T14:58:30,573 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-10T14:58:30,573 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-10T14:58:30,573 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-10T14:58:30,573 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-10T14:58:30,575 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=198, state=SUCCESS; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 378 msec 2024-12-10T14:58:30,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=198 2024-12-10T14:58:30,800 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp, procId: 198 completed 2024-12-10T14:58:30,800 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testExportFileSystemStateWithSkipTmp get assigned. Timeout = 60000ms 2024-12-10T14:58:30,800 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-10T14:58:30,803 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37707 {}] regionserver.StoreScanner(1133): Switch to stream read (scanned=32795 bytes) of info 2024-12-10T14:58:30,806 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testExportFileSystemStateWithSkipTmp assigned to meta. Checking AM states. 2024-12-10T14:58:30,807 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-10T14:58:30,807 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testExportFileSystemStateWithSkipTmp assigned. 2024-12-10T14:58:30,809 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-10T14:58:30,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733842710809 (current time:1733842710809). 2024-12-10T14:58:30,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-10T14:58:30,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-12-10T14:58:30,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-10T14:58:30,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3187ddc6 to 127.0.0.1:57176 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6d5f3fd5 2024-12-10T14:58:30,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5cf050ea, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:58:30,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:58:30,816 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36998, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:58:30,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3187ddc6 to 127.0.0.1:57176 2024-12-10T14:58:30,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:58:30,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3ded25ec to 127.0.0.1:57176 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@29d81413 2024-12-10T14:58:30,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@77f176c2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:58:30,823 DEBUG [hconnection-0x1829c8bd-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:58:30,824 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37006, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:58:30,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:58:30,827 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38318, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:58:30,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3ded25ec to 127.0.0.1:57176 2024-12-10T14:58:30,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:58:30,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-10T14:58:30,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-10T14:58:30,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] procedure2.ProcedureExecutor(1098): Stored pid=203, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-10T14:58:30,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 203 2024-12-10T14:58:30,830 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-10T14:58:30,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=203 2024-12-10T14:58:30,831 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-10T14:58:30,833 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-10T14:58:30,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742329_1505 (size=203) 2024-12-10T14:58:30,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742329_1505 (size=203) 2024-12-10T14:58:30,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742329_1505 (size=203) 2024-12-10T14:58:30,847 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-10T14:58:30,847 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=204, ppid=203, state=RUNNABLE; SnapshotRegionProcedure 7d77f1bda4bf89952efbc61133d5c34a}, {pid=205, ppid=203, state=RUNNABLE; SnapshotRegionProcedure ab2092c60854e26deb73b20350b7a268}] 2024-12-10T14:58:30,848 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=205, ppid=203, state=RUNNABLE; SnapshotRegionProcedure ab2092c60854e26deb73b20350b7a268 2024-12-10T14:58:30,848 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=204, ppid=203, state=RUNNABLE; SnapshotRegionProcedure 7d77f1bda4bf89952efbc61133d5c34a 2024-12-10T14:58:30,925 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-10T14:58:30,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=203 2024-12-10T14:58:30,999 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,45255,1733842503015 2024-12-10T14:58:30,999 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,41775,1733842502887 2024-12-10T14:58:31,000 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45255 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=205 2024-12-10T14:58:31,000 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41775 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=204 2024-12-10T14:58:31,000 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733842710194.ab2092c60854e26deb73b20350b7a268. 2024-12-10T14:58:31,000 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegion(2538): Flush status journal for ab2092c60854e26deb73b20350b7a268: 2024-12-10T14:58:31,000 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1733842710194.ab2092c60854e26deb73b20350b7a268. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-10T14:58:31,001 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1733842710194.ab2092c60854e26deb73b20350b7a268.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-10T14:58:31,001 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-10T14:58:31,001 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-10T14:58:31,001 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733842710194.7d77f1bda4bf89952efbc61133d5c34a. 2024-12-10T14:58:31,001 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegion(2538): Flush status journal for 7d77f1bda4bf89952efbc61133d5c34a: 2024-12-10T14:58:31,001 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1733842710194.7d77f1bda4bf89952efbc61133d5c34a. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-10T14:58:31,001 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1733842710194.7d77f1bda4bf89952efbc61133d5c34a.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-10T14:58:31,001 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-10T14:58:31,001 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-10T14:58:31,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742331_1507 (size=82) 2024-12-10T14:58:31,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742330_1506 (size=82) 2024-12-10T14:58:31,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742331_1507 (size=82) 2024-12-10T14:58:31,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742330_1506 (size=82) 2024-12-10T14:58:31,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742331_1507 (size=82) 2024-12-10T14:58:31,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742330_1506 (size=82) 2024-12-10T14:58:31,008 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733842710194.7d77f1bda4bf89952efbc61133d5c34a. 2024-12-10T14:58:31,008 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=204 2024-12-10T14:58:31,008 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733842710194.ab2092c60854e26deb73b20350b7a268. 2024-12-10T14:58:31,008 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=205 2024-12-10T14:58:31,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] master.HMaster(4106): Remote procedure done, pid=204 2024-12-10T14:58:31,008 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region 7d77f1bda4bf89952efbc61133d5c34a 2024-12-10T14:58:31,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.HMaster(4106): Remote procedure done, pid=205 2024-12-10T14:58:31,009 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region ab2092c60854e26deb73b20350b7a268 2024-12-10T14:58:31,009 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=204, ppid=203, state=RUNNABLE; SnapshotRegionProcedure 7d77f1bda4bf89952efbc61133d5c34a 2024-12-10T14:58:31,009 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=205, ppid=203, state=RUNNABLE; SnapshotRegionProcedure ab2092c60854e26deb73b20350b7a268 2024-12-10T14:58:31,010 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=204, ppid=203, state=SUCCESS; SnapshotRegionProcedure 7d77f1bda4bf89952efbc61133d5c34a in 162 msec 2024-12-10T14:58:31,011 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=205, resume processing ppid=203 2024-12-10T14:58:31,011 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=205, ppid=203, state=SUCCESS; SnapshotRegionProcedure ab2092c60854e26deb73b20350b7a268 in 162 msec 2024-12-10T14:58:31,011 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-10T14:58:31,012 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-10T14:58:31,012 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-10T14:58:31,012 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-10T14:58:31,013 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-10T14:58:31,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742332_1508 (size=585) 2024-12-10T14:58:31,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742332_1508 (size=585) 2024-12-10T14:58:31,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742332_1508 (size=585) 2024-12-10T14:58:31,023 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-10T14:58:31,027 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-10T14:58:31,027 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-10T14:58:31,028 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-10T14:58:31,028 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 203 2024-12-10T14:58:31,029 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=203, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 200 msec 2024-12-10T14:58:31,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=203 2024-12-10T14:58:31,132 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp, procId: 203 completed 2024-12-10T14:58:31,139 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41775 {}] regionserver.HRegion(8254): writing data to region testtb-testExportFileSystemStateWithSkipTmp,,1733842710194.7d77f1bda4bf89952efbc61133d5c34a. with WAL disabled. Data may be lost in the event of a crash. 2024-12-10T14:58:31,139 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45255 {}] regionserver.HRegion(8254): writing data to region testtb-testExportFileSystemStateWithSkipTmp,1,1733842710194.ab2092c60854e26deb73b20350b7a268. with WAL disabled. Data may be lost in the event of a crash. 2024-12-10T14:58:31,142 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportFileSystemStateWithSkipTmp 2024-12-10T14:58:31,142 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportFileSystemStateWithSkipTmp,,1733842710194.7d77f1bda4bf89952efbc61133d5c34a. 2024-12-10T14:58:31,142 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-10T14:58:31,153 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-10T14:58:31,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733842711153 (current time:1733842711153). 2024-12-10T14:58:31,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-10T14:58:31,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-12-10T14:58:31,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-10T14:58:31,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x25067f22 to 127.0.0.1:57176 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@689084f5 2024-12-10T14:58:31,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4fe1787b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:58:31,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:58:31,159 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37018, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:58:31,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x25067f22 to 127.0.0.1:57176 2024-12-10T14:58:31,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:58:31,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x29ba4f95 to 127.0.0.1:57176 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@45e1d17b 2024-12-10T14:58:31,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1874b1c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:58:31,168 DEBUG [hconnection-0x20b48156-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:58:31,169 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37030, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:58:31,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:58:31,172 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38334, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:58:31,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x29ba4f95 to 127.0.0.1:57176 2024-12-10T14:58:31,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:58:31,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-10T14:58:31,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-10T14:58:31,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] procedure2.ProcedureExecutor(1098): Stored pid=206, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-10T14:58:31,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 206 2024-12-10T14:58:31,175 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-10T14:58:31,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=206 2024-12-10T14:58:31,175 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-10T14:58:31,177 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-10T14:58:31,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742333_1509 (size=198) 2024-12-10T14:58:31,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742333_1509 (size=198) 2024-12-10T14:58:31,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742333_1509 (size=198) 2024-12-10T14:58:31,192 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-10T14:58:31,193 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=207, ppid=206, state=RUNNABLE; SnapshotRegionProcedure 7d77f1bda4bf89952efbc61133d5c34a}, {pid=208, ppid=206, state=RUNNABLE; SnapshotRegionProcedure ab2092c60854e26deb73b20350b7a268}] 2024-12-10T14:58:31,193 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=208, ppid=206, state=RUNNABLE; SnapshotRegionProcedure ab2092c60854e26deb73b20350b7a268 2024-12-10T14:58:31,193 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=207, ppid=206, state=RUNNABLE; SnapshotRegionProcedure 7d77f1bda4bf89952efbc61133d5c34a 2024-12-10T14:58:31,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=206 2024-12-10T14:58:31,344 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,45255,1733842503015 2024-12-10T14:58:31,344 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,41775,1733842502887 2024-12-10T14:58:31,345 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45255 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=208 2024-12-10T14:58:31,345 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41775 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=207 2024-12-10T14:58:31,345 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733842710194.ab2092c60854e26deb73b20350b7a268. 2024-12-10T14:58:31,345 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733842710194.7d77f1bda4bf89952efbc61133d5c34a. 2024-12-10T14:58:31,345 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.HRegion(2837): Flushing 7d77f1bda4bf89952efbc61133d5c34a 1/1 column families, dataSize=266 B heapSize=832 B 2024-12-10T14:58:31,345 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.HRegion(2837): Flushing ab2092c60854e26deb73b20350b7a268 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-12-10T14:58:31,362 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithSkipTmp/ab2092c60854e26deb73b20350b7a268/.tmp/cf/d3026af5f2eb4bc280b1d6fa1bfc2a21 is 71, key is 110aa9208022a5008abfe394f13dc6f7/cf:q/1733842711139/Put/seqid=0 2024-12-10T14:58:31,362 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithSkipTmp/7d77f1bda4bf89952efbc61133d5c34a/.tmp/cf/188dcc82443e499ea500bd16fbd8822b is 71, key is 05628babf32133d26ea97ae890e14186/cf:q/1733842711138/Put/seqid=0 2024-12-10T14:58:31,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742334_1510 (size=8256) 2024-12-10T14:58:31,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742334_1510 (size=8256) 2024-12-10T14:58:31,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742334_1510 (size=8256) 2024-12-10T14:58:31,375 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.00 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithSkipTmp/ab2092c60854e26deb73b20350b7a268/.tmp/cf/d3026af5f2eb4bc280b1d6fa1bfc2a21 2024-12-10T14:58:31,382 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithSkipTmp/ab2092c60854e26deb73b20350b7a268/.tmp/cf/d3026af5f2eb4bc280b1d6fa1bfc2a21 as hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithSkipTmp/ab2092c60854e26deb73b20350b7a268/cf/d3026af5f2eb4bc280b1d6fa1bfc2a21 2024-12-10T14:58:31,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742335_1511 (size=5356) 2024-12-10T14:58:31,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742335_1511 (size=5356) 2024-12-10T14:58:31,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742335_1511 (size=5356) 2024-12-10T14:58:31,386 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=266 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithSkipTmp/7d77f1bda4bf89952efbc61133d5c34a/.tmp/cf/188dcc82443e499ea500bd16fbd8822b 2024-12-10T14:58:31,388 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithSkipTmp/ab2092c60854e26deb73b20350b7a268/cf/d3026af5f2eb4bc280b1d6fa1bfc2a21, entries=46, sequenceid=6, filesize=8.1 K 2024-12-10T14:58:31,389 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.HRegion(3040): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for ab2092c60854e26deb73b20350b7a268 in 44ms, sequenceid=6, compaction requested=false 2024-12-10T14:58:31,389 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSkipTmp' 2024-12-10T14:58:31,390 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.HRegion(2538): Flush status journal for ab2092c60854e26deb73b20350b7a268: 2024-12-10T14:58:31,390 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1733842710194.ab2092c60854e26deb73b20350b7a268. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-10T14:58:31,390 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1733842710194.ab2092c60854e26deb73b20350b7a268.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-10T14:58:31,391 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-10T14:58:31,391 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithSkipTmp/ab2092c60854e26deb73b20350b7a268/cf/d3026af5f2eb4bc280b1d6fa1bfc2a21] hfiles 2024-12-10T14:58:31,391 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithSkipTmp/ab2092c60854e26deb73b20350b7a268/cf/d3026af5f2eb4bc280b1d6fa1bfc2a21 for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-10T14:58:31,391 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithSkipTmp/7d77f1bda4bf89952efbc61133d5c34a/.tmp/cf/188dcc82443e499ea500bd16fbd8822b as hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithSkipTmp/7d77f1bda4bf89952efbc61133d5c34a/cf/188dcc82443e499ea500bd16fbd8822b 2024-12-10T14:58:31,400 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithSkipTmp/7d77f1bda4bf89952efbc61133d5c34a/cf/188dcc82443e499ea500bd16fbd8822b, entries=4, sequenceid=6, filesize=5.2 K 2024-12-10T14:58:31,401 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.HRegion(3040): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 7d77f1bda4bf89952efbc61133d5c34a in 56ms, sequenceid=6, compaction requested=false 2024-12-10T14:58:31,401 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.HRegion(2538): Flush status journal for 7d77f1bda4bf89952efbc61133d5c34a: 2024-12-10T14:58:31,401 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1733842710194.7d77f1bda4bf89952efbc61133d5c34a. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-10T14:58:31,401 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1733842710194.7d77f1bda4bf89952efbc61133d5c34a.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-10T14:58:31,401 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-10T14:58:31,401 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithSkipTmp/7d77f1bda4bf89952efbc61133d5c34a/cf/188dcc82443e499ea500bd16fbd8822b] hfiles 2024-12-10T14:58:31,401 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithSkipTmp/7d77f1bda4bf89952efbc61133d5c34a/cf/188dcc82443e499ea500bd16fbd8822b for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-10T14:58:31,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742336_1512 (size=121) 2024-12-10T14:58:31,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742336_1512 (size=121) 2024-12-10T14:58:31,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742336_1512 (size=121) 2024-12-10T14:58:31,410 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733842710194.ab2092c60854e26deb73b20350b7a268. 2024-12-10T14:58:31,410 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=208 2024-12-10T14:58:31,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.HMaster(4106): Remote procedure done, pid=208 2024-12-10T14:58:31,411 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region ab2092c60854e26deb73b20350b7a268 2024-12-10T14:58:31,411 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=208, ppid=206, state=RUNNABLE; SnapshotRegionProcedure ab2092c60854e26deb73b20350b7a268 2024-12-10T14:58:31,412 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=208, ppid=206, state=SUCCESS; SnapshotRegionProcedure ab2092c60854e26deb73b20350b7a268 in 219 msec 2024-12-10T14:58:31,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742337_1513 (size=121) 2024-12-10T14:58:31,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742337_1513 (size=121) 2024-12-10T14:58:31,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742337_1513 (size=121) 2024-12-10T14:58:31,420 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733842710194.7d77f1bda4bf89952efbc61133d5c34a. 2024-12-10T14:58:31,420 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=207 2024-12-10T14:58:31,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.HMaster(4106): Remote procedure done, pid=207 2024-12-10T14:58:31,420 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region 7d77f1bda4bf89952efbc61133d5c34a 2024-12-10T14:58:31,420 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=207, ppid=206, state=RUNNABLE; SnapshotRegionProcedure 7d77f1bda4bf89952efbc61133d5c34a 2024-12-10T14:58:31,422 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=207, resume processing ppid=206 2024-12-10T14:58:31,422 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-10T14:58:31,422 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=207, ppid=206, state=SUCCESS; SnapshotRegionProcedure 7d77f1bda4bf89952efbc61133d5c34a in 229 msec 2024-12-10T14:58:31,423 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-10T14:58:31,423 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-10T14:58:31,423 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-10T14:58:31,424 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-10T14:58:31,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742338_1514 (size=663) 2024-12-10T14:58:31,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742338_1514 (size=663) 2024-12-10T14:58:31,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742338_1514 (size=663) 2024-12-10T14:58:31,440 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-10T14:58:31,446 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-10T14:58:31,446 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-10T14:58:31,447 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-10T14:58:31,447 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 206 2024-12-10T14:58:31,449 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=206, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 275 msec 2024-12-10T14:58:31,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=206 2024-12-10T14:58:31,477 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp, procId: 206 completed 2024-12-10T14:58:31,477 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842711477 2024-12-10T14:58:31,477 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:36449, tgtDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842711477, rawTgtDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842711477, srcFsUri=hdfs://localhost:36449, srcDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef 2024-12-10T14:58:31,522 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:36449, inputRoot=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef 2024-12-10T14:58:31,523 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_996159037_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842711477, skipTmp=true, initialOutputSnapshotDir=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842711477/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-10T14:58:31,524 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-10T14:58:31,529 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842711477/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-10T14:58:31,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742339_1515 (size=198) 2024-12-10T14:58:31,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742339_1515 (size=198) 2024-12-10T14:58:31,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742339_1515 (size=198) 2024-12-10T14:58:31,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742340_1516 (size=663) 2024-12-10T14:58:31,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742340_1516 (size=663) 2024-12-10T14:58:31,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742340_1516 (size=663) 2024-12-10T14:58:31,562 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-10T14:58:31,562 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-10T14:58:31,563 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-10T14:58:31,563 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-10T14:58:32,592 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-12-10T14:58:32,593 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp Metrics about Tables on a single HBase RegionServer 2024-12-10T14:58:32,593 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-12-10T14:58:32,661 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/hadoop-10827794268050576338.jar 2024-12-10T14:58:32,661 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-10T14:58:32,662 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-10T14:58:32,762 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/hadoop-16863009840775270673.jar 2024-12-10T14:58:32,762 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-10T14:58:32,763 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-10T14:58:32,763 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-10T14:58:32,764 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-10T14:58:32,764 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-10T14:58:32,764 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-10T14:58:32,764 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-10T14:58:32,765 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-10T14:58:32,765 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-10T14:58:32,765 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-10T14:58:32,766 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-10T14:58:32,766 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-10T14:58:32,766 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-10T14:58:32,767 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-10T14:58:32,767 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-10T14:58:32,767 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-10T14:58:32,767 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-10T14:58:32,768 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-10T14:58:32,768 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-10T14:58:32,769 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-10T14:58:32,769 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-10T14:58:32,769 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-10T14:58:32,769 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-10T14:58:32,770 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-10T14:58:32,770 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-10T14:58:32,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742341_1517 (size=127628) 2024-12-10T14:58:32,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742341_1517 (size=127628) 2024-12-10T14:58:32,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742341_1517 (size=127628) 2024-12-10T14:58:32,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742342_1518 (size=2172101) 2024-12-10T14:58:32,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742342_1518 (size=2172101) 2024-12-10T14:58:32,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742342_1518 (size=2172101) 2024-12-10T14:58:32,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742343_1519 (size=6350861) 2024-12-10T14:58:32,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742343_1519 (size=6350861) 2024-12-10T14:58:32,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742343_1519 (size=6350861) 2024-12-10T14:58:32,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742344_1520 (size=213228) 2024-12-10T14:58:32,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742344_1520 (size=213228) 2024-12-10T14:58:32,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742344_1520 (size=213228) 2024-12-10T14:58:32,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742345_1521 (size=1877034) 2024-12-10T14:58:32,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742345_1521 (size=1877034) 2024-12-10T14:58:32,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742345_1521 (size=1877034) 2024-12-10T14:58:32,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742346_1522 (size=533455) 2024-12-10T14:58:32,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742346_1522 (size=533455) 2024-12-10T14:58:32,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742346_1522 (size=533455) 2024-12-10T14:58:32,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742347_1523 (size=7280644) 2024-12-10T14:58:32,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742347_1523 (size=7280644) 2024-12-10T14:58:32,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742347_1523 (size=7280644) 2024-12-10T14:58:33,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742348_1524 (size=4188619) 2024-12-10T14:58:33,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742348_1524 (size=4188619) 2024-12-10T14:58:33,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742348_1524 (size=4188619) 2024-12-10T14:58:33,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742349_1525 (size=20406) 2024-12-10T14:58:33,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742349_1525 (size=20406) 2024-12-10T14:58:33,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742349_1525 (size=20406) 2024-12-10T14:58:33,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742350_1526 (size=75495) 2024-12-10T14:58:33,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742350_1526 (size=75495) 2024-12-10T14:58:33,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742350_1526 (size=75495) 2024-12-10T14:58:33,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742351_1527 (size=45609) 2024-12-10T14:58:33,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742351_1527 (size=45609) 2024-12-10T14:58:33,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742351_1527 (size=45609) 2024-12-10T14:58:33,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742352_1528 (size=110084) 2024-12-10T14:58:33,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742352_1528 (size=110084) 2024-12-10T14:58:33,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742352_1528 (size=110084) 2024-12-10T14:58:33,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742353_1529 (size=1323991) 2024-12-10T14:58:33,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742353_1529 (size=1323991) 2024-12-10T14:58:33,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742353_1529 (size=1323991) 2024-12-10T14:58:33,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742354_1530 (size=23076) 2024-12-10T14:58:33,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742354_1530 (size=23076) 2024-12-10T14:58:33,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742354_1530 (size=23076) 2024-12-10T14:58:33,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742355_1531 (size=451756) 2024-12-10T14:58:33,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742355_1531 (size=451756) 2024-12-10T14:58:33,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742355_1531 (size=451756) 2024-12-10T14:58:33,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742356_1532 (size=126803) 2024-12-10T14:58:33,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742356_1532 (size=126803) 2024-12-10T14:58:33,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742356_1532 (size=126803) 2024-12-10T14:58:33,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742357_1533 (size=322274) 2024-12-10T14:58:33,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742357_1533 (size=322274) 2024-12-10T14:58:33,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742357_1533 (size=322274) 2024-12-10T14:58:33,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742358_1534 (size=1832290) 2024-12-10T14:58:33,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742358_1534 (size=1832290) 2024-12-10T14:58:33,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742358_1534 (size=1832290) 2024-12-10T14:58:33,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742359_1535 (size=30081) 2024-12-10T14:58:33,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742359_1535 (size=30081) 2024-12-10T14:58:33,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742359_1535 (size=30081) 2024-12-10T14:58:33,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742360_1536 (size=53616) 2024-12-10T14:58:33,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742360_1536 (size=53616) 2024-12-10T14:58:33,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742360_1536 (size=53616) 2024-12-10T14:58:33,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742361_1537 (size=29229) 2024-12-10T14:58:33,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742361_1537 (size=29229) 2024-12-10T14:58:33,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742361_1537 (size=29229) 2024-12-10T14:58:33,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742362_1538 (size=169089) 2024-12-10T14:58:33,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742362_1538 (size=169089) 2024-12-10T14:58:33,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742362_1538 (size=169089) 2024-12-10T14:58:33,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742363_1539 (size=5175431) 2024-12-10T14:58:33,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742363_1539 (size=5175431) 2024-12-10T14:58:33,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742363_1539 (size=5175431) 2024-12-10T14:58:33,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742364_1540 (size=136454) 2024-12-10T14:58:33,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742364_1540 (size=136454) 2024-12-10T14:58:33,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742364_1540 (size=136454) 2024-12-10T14:58:33,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742365_1541 (size=907858) 2024-12-10T14:58:33,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742365_1541 (size=907858) 2024-12-10T14:58:33,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742365_1541 (size=907858) 2024-12-10T14:58:33,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742366_1542 (size=3317408) 2024-12-10T14:58:33,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742366_1542 (size=3317408) 2024-12-10T14:58:33,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742366_1542 (size=3317408) 2024-12-10T14:58:33,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742367_1543 (size=503880) 2024-12-10T14:58:33,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742367_1543 (size=503880) 2024-12-10T14:58:33,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742367_1543 (size=503880) 2024-12-10T14:58:33,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742368_1544 (size=4695811) 2024-12-10T14:58:33,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742368_1544 (size=4695811) 2024-12-10T14:58:33,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742368_1544 (size=4695811) 2024-12-10T14:58:33,237 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-10T14:58:33,239 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testExportFileSystemStateWithSkipTmp' hfile list 2024-12-10T14:58:33,241 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K 2024-12-10T14:58:33,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742369_1545 (size=366) 2024-12-10T14:58:33,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742369_1545 (size=366) 2024-12-10T14:58:33,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742369_1545 (size=366) 2024-12-10T14:58:33,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742370_1546 (size=15) 2024-12-10T14:58:33,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742370_1546 (size=15) 2024-12-10T14:58:33,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742370_1546 (size=15) 2024-12-10T14:58:33,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742371_1547 (size=305057) 2024-12-10T14:58:33,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742371_1547 (size=305057) 2024-12-10T14:58:33,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742371_1547 (size=305057) 2024-12-10T14:58:34,354 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-10T14:58:34,354 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-10T14:58:34,357 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733842510486_0009_000001 (auth:SIMPLE) from 127.0.0.1:59116 2024-12-10T14:58:34,369 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-1_0/usercache/jenkins/appcache/application_1733842510486_0009/container_1733842510486_0009_01_000001/launch_container.sh] 2024-12-10T14:58:34,369 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-1_0/usercache/jenkins/appcache/application_1733842510486_0009/container_1733842510486_0009_01_000001/container_tokens] 2024-12-10T14:58:34,369 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-1_0/usercache/jenkins/appcache/application_1733842510486_0009/container_1733842510486_0009_01_000001/sysfs] 2024-12-10T14:58:34,561 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733842510486_0010_000001 (auth:SIMPLE) from 127.0.0.1:33100 2024-12-10T14:58:35,408 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-10T14:58:39,514 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733842510486_0010_000001 (auth:SIMPLE) from 127.0.0.1:52736 2024-12-10T14:58:39,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742372_1548 (size=350755) 2024-12-10T14:58:39,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742372_1548 (size=350755) 2024-12-10T14:58:39,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742372_1548 (size=350755) 2024-12-10T14:58:41,816 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733842510486_0010_000001 (auth:SIMPLE) from 127.0.0.1:44184 2024-12-10T14:58:45,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742373_1549 (size=8256) 2024-12-10T14:58:45,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742373_1549 (size=8256) 2024-12-10T14:58:45,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742373_1549 (size=8256) 2024-12-10T14:58:45,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742374_1550 (size=5356) 2024-12-10T14:58:45,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742374_1550 (size=5356) 2024-12-10T14:58:45,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742374_1550 (size=5356) 2024-12-10T14:58:45,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742375_1551 (size=17455) 2024-12-10T14:58:45,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742375_1551 (size=17455) 2024-12-10T14:58:45,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742375_1551 (size=17455) 2024-12-10T14:58:45,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742376_1552 (size=476) 2024-12-10T14:58:45,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742376_1552 (size=476) 2024-12-10T14:58:45,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742376_1552 (size=476) 2024-12-10T14:58:45,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742377_1553 (size=17455) 2024-12-10T14:58:45,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742377_1553 (size=17455) 2024-12-10T14:58:45,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742377_1553 (size=17455) 2024-12-10T14:58:45,343 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-0_0/usercache/jenkins/appcache/application_1733842510486_0010/container_1733842510486_0010_01_000002/launch_container.sh] 2024-12-10T14:58:45,343 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-0_0/usercache/jenkins/appcache/application_1733842510486_0010/container_1733842510486_0010_01_000002/container_tokens] 2024-12-10T14:58:45,343 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-0_0/usercache/jenkins/appcache/application_1733842510486_0010/container_1733842510486_0010_01_000002/sysfs] 2024-12-10T14:58:45,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742378_1554 (size=350755) 2024-12-10T14:58:45,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742378_1554 (size=350755) 2024-12-10T14:58:45,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742378_1554 (size=350755) 2024-12-10T14:58:45,375 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733842510486_0010_000001 (auth:SIMPLE) from 127.0.0.1:44186 2024-12-10T14:58:46,625 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-10T14:58:46,625 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-10T14:58:46,630 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-10T14:58:46,631 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-10T14:58:46,631 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-10T14:58:46,631 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_996159037_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-10T14:58:46,631 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-12-10T14:58:46,631 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-12-10T14:58:46,631 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_996159037_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842711477/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842711477/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-10T14:58:46,632 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842711477/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-12-10T14:58:46,632 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/export-test/export-1733842711477/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-12-10T14:58:46,636 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportFileSystemStateWithSkipTmp 2024-12-10T14:58:46,637 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithSkipTmp 2024-12-10T14:58:46,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] procedure2.ProcedureExecutor(1098): Stored pid=209, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-10T14:58:46,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=209 2024-12-10T14:58:46,639 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733842726639"}]},"ts":"1733842726639"} 2024-12-10T14:58:46,640 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLING in hbase:meta 2024-12-10T14:58:46,642 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLING 2024-12-10T14:58:46,642 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=210, ppid=209, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp}] 2024-12-10T14:58:46,643 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=211, ppid=210, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=7d77f1bda4bf89952efbc61133d5c34a, UNASSIGN}, {pid=212, ppid=210, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=ab2092c60854e26deb73b20350b7a268, UNASSIGN}] 2024-12-10T14:58:46,644 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=211, ppid=210, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=7d77f1bda4bf89952efbc61133d5c34a, UNASSIGN 2024-12-10T14:58:46,644 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=212, ppid=210, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=ab2092c60854e26deb73b20350b7a268, UNASSIGN 2024-12-10T14:58:46,644 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=211 updating hbase:meta row=7d77f1bda4bf89952efbc61133d5c34a, regionState=CLOSING, regionLocation=db1d50717577,41775,1733842502887 2024-12-10T14:58:46,645 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=212 updating hbase:meta row=ab2092c60854e26deb73b20350b7a268, regionState=CLOSING, regionLocation=db1d50717577,45255,1733842503015 2024-12-10T14:58:46,645 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-10T14:58:46,645 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=213, ppid=211, state=RUNNABLE; CloseRegionProcedure 7d77f1bda4bf89952efbc61133d5c34a, server=db1d50717577,41775,1733842502887}] 2024-12-10T14:58:46,646 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-10T14:58:46,646 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=214, ppid=212, state=RUNNABLE; CloseRegionProcedure ab2092c60854e26deb73b20350b7a268, server=db1d50717577,45255,1733842503015}] 2024-12-10T14:58:46,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=209 2024-12-10T14:58:46,797 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,41775,1733842502887 2024-12-10T14:58:46,797 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,45255,1733842503015 2024-12-10T14:58:46,797 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] handler.UnassignRegionHandler(124): Close ab2092c60854e26deb73b20350b7a268 2024-12-10T14:58:46,797 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] handler.UnassignRegionHandler(124): Close 7d77f1bda4bf89952efbc61133d5c34a 2024-12-10T14:58:46,797 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-10T14:58:46,797 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-10T14:58:46,798 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1681): Closing 7d77f1bda4bf89952efbc61133d5c34a, disabling compactions & flushes 2024-12-10T14:58:46,798 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1681): Closing ab2092c60854e26deb73b20350b7a268, disabling compactions & flushes 2024-12-10T14:58:46,798 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1733842710194.ab2092c60854e26deb73b20350b7a268. 2024-12-10T14:58:46,798 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1733842710194.7d77f1bda4bf89952efbc61133d5c34a. 2024-12-10T14:58:46,798 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733842710194.7d77f1bda4bf89952efbc61133d5c34a. 2024-12-10T14:58:46,798 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733842710194.ab2092c60854e26deb73b20350b7a268. 2024-12-10T14:58:46,798 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733842710194.ab2092c60854e26deb73b20350b7a268. after waiting 0 ms 2024-12-10T14:58:46,798 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733842710194.7d77f1bda4bf89952efbc61133d5c34a. after waiting 0 ms 2024-12-10T14:58:46,798 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1733842710194.ab2092c60854e26deb73b20350b7a268. 2024-12-10T14:58:46,798 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1733842710194.7d77f1bda4bf89952efbc61133d5c34a. 2024-12-10T14:58:46,802 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithSkipTmp/7d77f1bda4bf89952efbc61133d5c34a/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-10T14:58:46,802 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithSkipTmp/ab2092c60854e26deb73b20350b7a268/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-10T14:58:46,802 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-10T14:58:46,802 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-10T14:58:46,802 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1733842710194.ab2092c60854e26deb73b20350b7a268. 2024-12-10T14:58:46,802 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithSkipTmp,,1733842710194.7d77f1bda4bf89952efbc61133d5c34a. 2024-12-10T14:58:46,802 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1635): Region close journal for ab2092c60854e26deb73b20350b7a268: 2024-12-10T14:58:46,802 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1635): Region close journal for 7d77f1bda4bf89952efbc61133d5c34a: 2024-12-10T14:58:46,803 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] handler.UnassignRegionHandler(170): Closed 7d77f1bda4bf89952efbc61133d5c34a 2024-12-10T14:58:46,804 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=211 updating hbase:meta row=7d77f1bda4bf89952efbc61133d5c34a, regionState=CLOSED 2024-12-10T14:58:46,804 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] handler.UnassignRegionHandler(170): Closed ab2092c60854e26deb73b20350b7a268 2024-12-10T14:58:46,804 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=212 updating hbase:meta row=ab2092c60854e26deb73b20350b7a268, regionState=CLOSED 2024-12-10T14:58:46,806 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=213, resume processing ppid=211 2024-12-10T14:58:46,806 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=213, ppid=211, state=SUCCESS; CloseRegionProcedure 7d77f1bda4bf89952efbc61133d5c34a, server=db1d50717577,41775,1733842502887 in 160 msec 2024-12-10T14:58:46,806 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=214, resume processing ppid=212 2024-12-10T14:58:46,806 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=214, ppid=212, state=SUCCESS; CloseRegionProcedure ab2092c60854e26deb73b20350b7a268, server=db1d50717577,45255,1733842503015 in 159 msec 2024-12-10T14:58:46,807 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=211, ppid=210, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=7d77f1bda4bf89952efbc61133d5c34a, UNASSIGN in 163 msec 2024-12-10T14:58:46,808 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=212, resume processing ppid=210 2024-12-10T14:58:46,808 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=212, ppid=210, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=ab2092c60854e26deb73b20350b7a268, UNASSIGN in 163 msec 2024-12-10T14:58:46,809 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=210, resume processing ppid=209 2024-12-10T14:58:46,809 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=210, ppid=209, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 166 msec 2024-12-10T14:58:46,810 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733842726810"}]},"ts":"1733842726810"} 2024-12-10T14:58:46,811 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLED in hbase:meta 2024-12-10T14:58:46,813 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLED 2024-12-10T14:58:46,814 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=209, state=SUCCESS; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 177 msec 2024-12-10T14:58:46,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=209 2024-12-10T14:58:46,940 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp, procId: 209 completed 2024-12-10T14:58:46,941 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithSkipTmp 2024-12-10T14:58:46,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] procedure2.ProcedureExecutor(1098): Stored pid=215, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-10T14:58:46,942 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=215, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-10T14:58:46,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportFileSystemStateWithSkipTmp 2024-12-10T14:58:46,943 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=215, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-10T14:58:46,944 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41775 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithSkipTmp 2024-12-10T14:58:46,945 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithSkipTmp/7d77f1bda4bf89952efbc61133d5c34a 2024-12-10T14:58:46,945 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithSkipTmp/ab2092c60854e26deb73b20350b7a268 2024-12-10T14:58:46,947 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithSkipTmp/7d77f1bda4bf89952efbc61133d5c34a/cf, FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithSkipTmp/7d77f1bda4bf89952efbc61133d5c34a/recovered.edits] 2024-12-10T14:58:46,947 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithSkipTmp/ab2092c60854e26deb73b20350b7a268/cf, FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithSkipTmp/ab2092c60854e26deb73b20350b7a268/recovered.edits] 2024-12-10T14:58:46,948 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-10T14:58:46,948 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-10T14:58:46,948 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45255-0x1019cdee8310003, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-10T14:58:46,948 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-10T14:58:46,949 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-10T14:58:46,949 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-10T14:58:46,949 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-10T14:58:46,949 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-10T14:58:46,950 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-10T14:58:46,950 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45255-0x1019cdee8310003, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-10T14:58:46,950 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-10T14:58:46,950 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-10T14:58:46,950 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:58:46,950 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:58:46,950 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45255-0x1019cdee8310003, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:58:46,950 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-10T14:58:46,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=215 2024-12-10T14:58:46,951 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-10T14:58:46,951 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-10T14:58:46,951 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-10T14:58:46,951 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-10T14:58:46,951 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithSkipTmp/ab2092c60854e26deb73b20350b7a268/cf/d3026af5f2eb4bc280b1d6fa1bfc2a21 to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/ab2092c60854e26deb73b20350b7a268/cf/d3026af5f2eb4bc280b1d6fa1bfc2a21 2024-12-10T14:58:46,952 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithSkipTmp/7d77f1bda4bf89952efbc61133d5c34a/cf/188dcc82443e499ea500bd16fbd8822b to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/7d77f1bda4bf89952efbc61133d5c34a/cf/188dcc82443e499ea500bd16fbd8822b 2024-12-10T14:58:46,954 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithSkipTmp/ab2092c60854e26deb73b20350b7a268/recovered.edits/9.seqid to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/ab2092c60854e26deb73b20350b7a268/recovered.edits/9.seqid 2024-12-10T14:58:46,955 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithSkipTmp/7d77f1bda4bf89952efbc61133d5c34a/recovered.edits/9.seqid to hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/7d77f1bda4bf89952efbc61133d5c34a/recovered.edits/9.seqid 2024-12-10T14:58:46,955 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithSkipTmp/ab2092c60854e26deb73b20350b7a268 2024-12-10T14:58:46,955 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testtb-testExportFileSystemStateWithSkipTmp/7d77f1bda4bf89952efbc61133d5c34a 2024-12-10T14:58:46,955 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithSkipTmp regions 2024-12-10T14:58:46,957 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=215, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-10T14:58:46,958 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithSkipTmp from hbase:meta 2024-12-10T14:58:46,960 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportFileSystemStateWithSkipTmp' descriptor. 2024-12-10T14:58:46,961 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=215, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-10T14:58:46,961 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportFileSystemStateWithSkipTmp' from region states. 2024-12-10T14:58:46,961 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1733842710194.7d77f1bda4bf89952efbc61133d5c34a.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733842726961"}]},"ts":"9223372036854775807"} 2024-12-10T14:58:46,961 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1733842710194.ab2092c60854e26deb73b20350b7a268.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733842726961"}]},"ts":"9223372036854775807"} 2024-12-10T14:58:46,963 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-10T14:58:46,963 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 7d77f1bda4bf89952efbc61133d5c34a, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1733842710194.7d77f1bda4bf89952efbc61133d5c34a.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => ab2092c60854e26deb73b20350b7a268, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1733842710194.ab2092c60854e26deb73b20350b7a268.', STARTKEY => '1', ENDKEY => ''}] 2024-12-10T14:58:46,963 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportFileSystemStateWithSkipTmp' as deleted. 2024-12-10T14:58:46,963 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733842726963"}]},"ts":"9223372036854775807"} 2024-12-10T14:58:46,964 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportFileSystemStateWithSkipTmp state from META 2024-12-10T14:58:46,966 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=215, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-10T14:58:46,967 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=215, state=SUCCESS; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 25 msec 2024-12-10T14:58:47,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=215 2024-12-10T14:58:47,051 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp, procId: 215 completed 2024-12-10T14:58:47,057 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithSkipTmp" 2024-12-10T14:58:47,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-10T14:58:47,059 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithSkipTmp" 2024-12-10T14:58:47,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-10T14:58:47,080 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=815 (was 812) Potentially hanging thread: hconnection-0x50bdc134-shared-pool-51 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 15018) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #17 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-355840034_1 at /127.0.0.1:48160 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-24 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-7502 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: hconnection-0x50bdc134-shared-pool-49 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-23 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1891196410) connection to localhost/127.0.0.1:45723 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: hconnection-0x50bdc134-shared-pool-50 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45723 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_996159037_22 at /127.0.0.1:41744 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-355840034_1 at /127.0.0.1:58540 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_996159037_22 at /127.0.0.1:48176 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_996159037_22 at /127.0.0.1:58556 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x50bdc134-shared-pool-52 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=809 (was 810), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=367 (was 369), ProcessCount=17 (was 17), AvailableMemoryMB=603 (was 561) - AvailableMemoryMB LEAK? - 2024-12-10T14:58:47,080 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=815 is superior to 500 2024-12-10T14:58:47,080 INFO [Time-limited test {}] hbase.HBaseTestingUtility(2861): Stopping mini mapreduce cluster... 2024-12-10T14:58:47,087 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4fdaa78e{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-10T14:58:47,089 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@31762410{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-10T14:58:47,089 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-10T14:58:47,089 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5946649e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-10T14:58:47,090 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@68541e95{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/hadoop.log.dir/,STOPPED} 2024-12-10T14:58:49,335 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 32558485cb37ace42cbc3671b13298ba, had cached 0 bytes from a total of 8394 2024-12-10T14:58:49,344 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 3d3c5d37b59f3a27bd963f36e8591d10, had cached 0 bytes from a total of 5216 2024-12-10T14:58:51,476 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733842510486_0010_000001 (auth:SIMPLE) from 127.0.0.1:52736 2024-12-10T14:58:51,489 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-0_2/usercache/jenkins/appcache/application_1733842510486_0010/container_1733842510486_0010_01_000001/launch_container.sh] 2024-12-10T14:58:51,489 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-0_2/usercache/jenkins/appcache/application_1733842510486_0010/container_1733842510486_0010_01_000001/container_tokens] 2024-12-10T14:58:51,489 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/MiniMRCluster_134286943/yarn-6926827336/MiniMRCluster_134286943-localDir-nm-0_2/usercache/jenkins/appcache/application_1733842510486_0010/container_1733842510486_0010_01_000001/sysfs] 2024-12-10T14:58:52,324 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-10T14:58:52,592 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-12-10T14:58:58,094 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-10T14:59:00,925 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-10T14:59:04,101 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3ca3f0e8{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-10T14:59:04,102 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@17f11178{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-10T14:59:04,102 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-10T14:59:04,102 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1f4563e1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-10T14:59:04,102 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3604227f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/hadoop.log.dir/,STOPPED} 2024-12-10T14:59:21,109 ERROR [Thread[Thread-417,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-10T14:59:21,109 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4c9b4752{cluster,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-12-10T14:59:21,110 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6f5514d3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-10T14:59:21,110 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-10T14:59:21,110 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1fe36ded{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-10T14:59:21,110 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@74dfb21{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/hadoop.log.dir/,STOPPED} 2024-12-10T14:59:21,114 WARN [ApplicationMaster Launcher {}] amlauncher.ApplicationMasterLauncher$LauncherThread(122): org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher$LauncherThread interrupted. Returning. 2024-12-10T14:59:21,120 ERROR [SchedulerEventDispatcher:Event Processor {}] event.EventDispatcher$EventProcessor(72): Returning, interrupted : java.lang.InterruptedException 2024-12-10T14:59:21,120 ERROR [ResourceManager Event Processor Monitor {}] resourcemanager.ResourceManager$SchedulerEventDispatcher$EventProcessorMonitor(1193): Returning, interrupted : java.lang.InterruptedException: sleep interrupted 2024-12-10T14:59:21,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741830_1006 (size=946893) 2024-12-10T14:59:21,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741830_1006 (size=946893) 2024-12-10T14:59:21,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741830_1006 (size=946893) 2024-12-10T14:59:21,127 ERROR [Thread[Thread-440,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-10T14:59:21,129 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@71eb319{jobhistory,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-12-10T14:59:21,130 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5c6d1600{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-10T14:59:21,130 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-10T14:59:21,130 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@49ebb85c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-10T14:59:21,130 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@136ae407{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/hadoop.log.dir/,STOPPED} 2024-12-10T14:59:21,132 ERROR [Thread[Thread-399,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-10T14:59:21,132 INFO [Time-limited test {}] hbase.HBaseTestingUtility(2864): Mini mapreduce cluster stopped 2024-12-10T14:59:21,132 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-10T14:59:21,132 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-10T14:59:21,132 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x75e74344 to 127.0.0.1:57176 2024-12-10T14:59:21,132 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:59:21,132 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-10T14:59:21,132 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1219340791, stopped=false 2024-12-10T14:59:21,133 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-10T14:59:21,133 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-10T14:59:21,133 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=db1d50717577,36533,1733842502047 2024-12-10T14:59:21,134 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-10T14:59:21,134 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-10T14:59:21,134 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-10T14:59:21,134 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-10T14:59:21,134 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T14:59:21,134 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T14:59:21,134 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45255-0x1019cdee8310003, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-10T14:59:21,134 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T14:59:21,134 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45255-0x1019cdee8310003, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T14:59:21,135 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:59:21,135 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45255-0x1019cdee8310003, quorum=127.0.0.1:57176, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T14:59:21,135 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T14:59:21,135 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T14:59:21,135 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T14:59:21,135 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'db1d50717577,41775,1733842502887' ***** 2024-12-10T14:59:21,135 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-10T14:59:21,135 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-10T14:59:21,136 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'db1d50717577,37707,1733842502956' ***** 2024-12-10T14:59:21,136 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-10T14:59:21,136 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-10T14:59:21,136 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'db1d50717577,45255,1733842503015' ***** 2024-12-10T14:59:21,136 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-10T14:59:21,136 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-10T14:59:21,136 INFO [RS:0;db1d50717577:41775 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-10T14:59:21,136 INFO [RS:1;db1d50717577:37707 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-10T14:59:21,136 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-10T14:59:21,136 INFO [RS:1;db1d50717577:37707 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-10T14:59:21,136 INFO [RS:0;db1d50717577:41775 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-10T14:59:21,136 INFO [RS:0;db1d50717577:41775 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-10T14:59:21,136 INFO [RS:2;db1d50717577:45255 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-10T14:59:21,136 INFO [RS:2;db1d50717577:45255 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-10T14:59:21,136 INFO [RS:2;db1d50717577:45255 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-10T14:59:21,136 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-10T14:59:21,136 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-10T14:59:21,137 INFO [RS:0;db1d50717577:41775 {}] regionserver.HRegionServer(3579): Received CLOSE for 99bb2446a6b84f36bf016b19fe4722e6 2024-12-10T14:59:21,137 INFO [RS:2;db1d50717577:45255 {}] regionserver.HRegionServer(3579): Received CLOSE for 763faff5dd874fa35c8dd37f5437c5e6 2024-12-10T14:59:21,137 INFO [RS:1;db1d50717577:37707 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-10T14:59:21,137 INFO [RS:1;db1d50717577:37707 {}] regionserver.HRegionServer(3579): Received CLOSE for 32558485cb37ace42cbc3671b13298ba 2024-12-10T14:59:21,137 INFO [RS:2;db1d50717577:45255 {}] regionserver.HRegionServer(1224): stopping server db1d50717577,45255,1733842503015 2024-12-10T14:59:21,137 DEBUG [RS:2;db1d50717577:45255 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:59:21,137 INFO [RS:1;db1d50717577:37707 {}] regionserver.HRegionServer(1224): stopping server db1d50717577,37707,1733842502956 2024-12-10T14:59:21,137 INFO [RS:0;db1d50717577:41775 {}] regionserver.HRegionServer(3579): Received CLOSE for 3d3c5d37b59f3a27bd963f36e8591d10 2024-12-10T14:59:21,137 DEBUG [RS:1;db1d50717577:37707 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:59:21,137 INFO [RS:0;db1d50717577:41775 {}] regionserver.HRegionServer(1224): stopping server db1d50717577,41775,1733842502887 2024-12-10T14:59:21,137 INFO [RS:2;db1d50717577:45255 {}] regionserver.HRegionServer(1599): Waiting on 1 regions to close 2024-12-10T14:59:21,137 DEBUG [RS:2;db1d50717577:45255 {}] regionserver.HRegionServer(1603): Online Regions={763faff5dd874fa35c8dd37f5437c5e6=hbase:namespace,,1733842505989.763faff5dd874fa35c8dd37f5437c5e6.} 2024-12-10T14:59:21,137 INFO [RS:1;db1d50717577:37707 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-10T14:59:21,137 INFO [RS:1;db1d50717577:37707 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-10T14:59:21,137 DEBUG [RS:0;db1d50717577:41775 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:59:21,137 INFO [RS:1;db1d50717577:37707 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-10T14:59:21,137 INFO [RS:0;db1d50717577:41775 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-12-10T14:59:21,137 INFO [RS:1;db1d50717577:37707 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-10T14:59:21,137 DEBUG [RS:0;db1d50717577:41775 {}] regionserver.HRegionServer(1603): Online Regions={99bb2446a6b84f36bf016b19fe4722e6=hbase:acl,,1733842506693.99bb2446a6b84f36bf016b19fe4722e6., 3d3c5d37b59f3a27bd963f36e8591d10=testExportExpiredSnapshot,,1733842638953.3d3c5d37b59f3a27bd963f36e8591d10.} 2024-12-10T14:59:21,138 INFO [RS:1;db1d50717577:37707 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-12-10T14:59:21,138 DEBUG [RS:1;db1d50717577:37707 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740, 32558485cb37ace42cbc3671b13298ba=testExportExpiredSnapshot,1,1733842638953.32558485cb37ace42cbc3671b13298ba.} 2024-12-10T14:59:21,139 INFO [regionserver/db1d50717577:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-10T14:59:21,139 INFO [regionserver/db1d50717577:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-10T14:59:21,140 INFO [regionserver/db1d50717577:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-10T14:59:21,140 INFO [regionserver/db1d50717577:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-10T14:59:21,141 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 99bb2446a6b84f36bf016b19fe4722e6, disabling compactions & flushes 2024-12-10T14:59:21,141 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 763faff5dd874fa35c8dd37f5437c5e6, disabling compactions & flushes 2024-12-10T14:59:21,141 DEBUG [RS_CLOSE_META-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-10T14:59:21,141 INFO [RS_CLOSE_META-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-10T14:59:21,141 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:acl,,1733842506693.99bb2446a6b84f36bf016b19fe4722e6. 2024-12-10T14:59:21,141 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733842505989.763faff5dd874fa35c8dd37f5437c5e6. 2024-12-10T14:59:21,141 DEBUG [RS_CLOSE_META-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-10T14:59:21,141 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:acl,,1733842506693.99bb2446a6b84f36bf016b19fe4722e6. 2024-12-10T14:59:21,141 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733842505989.763faff5dd874fa35c8dd37f5437c5e6. 2024-12-10T14:59:21,141 DEBUG [RS_CLOSE_META-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-10T14:59:21,141 DEBUG [RS_CLOSE_META-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-10T14:59:21,141 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733842505989.763faff5dd874fa35c8dd37f5437c5e6. after waiting 0 ms 2024-12-10T14:59:21,141 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:acl,,1733842506693.99bb2446a6b84f36bf016b19fe4722e6. after waiting 0 ms 2024-12-10T14:59:21,141 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:acl,,1733842506693.99bb2446a6b84f36bf016b19fe4722e6. 2024-12-10T14:59:21,141 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733842505989.763faff5dd874fa35c8dd37f5437c5e6. 2024-12-10T14:59:21,141 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 763faff5dd874fa35c8dd37f5437c5e6 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-10T14:59:21,141 INFO [RS_CLOSE_META-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=68.66 KB heapSize=109 KB 2024-12-10T14:59:21,141 DEBUG [RS:1;db1d50717577:37707 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 32558485cb37ace42cbc3671b13298ba 2024-12-10T14:59:21,141 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 99bb2446a6b84f36bf016b19fe4722e6 1/1 column families, dataSize=1.38 KB heapSize=3.33 KB 2024-12-10T14:59:21,141 DEBUG [RS:2;db1d50717577:45255 {}] regionserver.HRegionServer(1629): Waiting on 763faff5dd874fa35c8dd37f5437c5e6 2024-12-10T14:59:21,141 DEBUG [RS:0;db1d50717577:41775 {}] regionserver.HRegionServer(1629): Waiting on 3d3c5d37b59f3a27bd963f36e8591d10, 99bb2446a6b84f36bf016b19fe4722e6 2024-12-10T14:59:21,141 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 32558485cb37ace42cbc3671b13298ba, disabling compactions & flushes 2024-12-10T14:59:21,141 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region testExportExpiredSnapshot,1,1733842638953.32558485cb37ace42cbc3671b13298ba. 2024-12-10T14:59:21,141 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportExpiredSnapshot,1,1733842638953.32558485cb37ace42cbc3671b13298ba. 2024-12-10T14:59:21,141 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on testExportExpiredSnapshot,1,1733842638953.32558485cb37ace42cbc3671b13298ba. after waiting 0 ms 2024-12-10T14:59:21,141 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region testExportExpiredSnapshot,1,1733842638953.32558485cb37ace42cbc3671b13298ba. 2024-12-10T14:59:21,155 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testExportExpiredSnapshot/32558485cb37ace42cbc3671b13298ba/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-10T14:59:21,156 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-10T14:59:21,156 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed testExportExpiredSnapshot,1,1733842638953.32558485cb37ace42cbc3671b13298ba. 2024-12-10T14:59:21,156 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 32558485cb37ace42cbc3671b13298ba: 2024-12-10T14:59:21,156 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,1,1733842638953.32558485cb37ace42cbc3671b13298ba. 2024-12-10T14:59:21,158 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/hbase/namespace/763faff5dd874fa35c8dd37f5437c5e6/.tmp/info/d12061cbc9a0445b8b605361752757e1 is 45, key is default/info:d/1733842506555/Put/seqid=0 2024-12-10T14:59:21,158 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/hbase/acl/99bb2446a6b84f36bf016b19fe4722e6/.tmp/l/ccac384646bb48a4896b75d05d7a9da6 is 74, key is testtb-testExportFileSystemStateWithMergeRegion-1/l:/1733842636615/DeleteFamily/seqid=0 2024-12-10T14:59:21,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742379_1555 (size=5037) 2024-12-10T14:59:21,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742379_1555 (size=5037) 2024-12-10T14:59:21,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742379_1555 (size=5037) 2024-12-10T14:59:21,165 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/hbase/namespace/763faff5dd874fa35c8dd37f5437c5e6/.tmp/info/d12061cbc9a0445b8b605361752757e1 2024-12-10T14:59:21,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742380_1556 (size=5695) 2024-12-10T14:59:21,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742380_1556 (size=5695) 2024-12-10T14:59:21,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742380_1556 (size=5695) 2024-12-10T14:59:21,168 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.38 KB at sequenceid=27 (bloomFilter=false), to=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/hbase/acl/99bb2446a6b84f36bf016b19fe4722e6/.tmp/l/ccac384646bb48a4896b75d05d7a9da6 2024-12-10T14:59:21,169 DEBUG [RS_CLOSE_META-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/hbase/meta/1588230740/.tmp/info/d27f94cabdac414e9474b9dc0f921c9e is 173, key is testExportExpiredSnapshot,1,1733842638953.32558485cb37ace42cbc3671b13298ba./info:regioninfo/1733842639350/Put/seqid=0 2024-12-10T14:59:21,173 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for ccac384646bb48a4896b75d05d7a9da6 2024-12-10T14:59:21,173 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/hbase/acl/99bb2446a6b84f36bf016b19fe4722e6/.tmp/l/ccac384646bb48a4896b75d05d7a9da6 as hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/hbase/acl/99bb2446a6b84f36bf016b19fe4722e6/l/ccac384646bb48a4896b75d05d7a9da6 2024-12-10T14:59:21,174 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/hbase/namespace/763faff5dd874fa35c8dd37f5437c5e6/.tmp/info/d12061cbc9a0445b8b605361752757e1 as hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/hbase/namespace/763faff5dd874fa35c8dd37f5437c5e6/info/d12061cbc9a0445b8b605361752757e1 2024-12-10T14:59:21,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742381_1557 (size=15630) 2024-12-10T14:59:21,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742381_1557 (size=15630) 2024-12-10T14:59:21,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742381_1557 (size=15630) 2024-12-10T14:59:21,175 INFO [RS_CLOSE_META-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.26 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/hbase/meta/1588230740/.tmp/info/d27f94cabdac414e9474b9dc0f921c9e 2024-12-10T14:59:21,178 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/hbase/namespace/763faff5dd874fa35c8dd37f5437c5e6/info/d12061cbc9a0445b8b605361752757e1, entries=2, sequenceid=6, filesize=4.9 K 2024-12-10T14:59:21,178 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for ccac384646bb48a4896b75d05d7a9da6 2024-12-10T14:59:21,178 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/hbase/acl/99bb2446a6b84f36bf016b19fe4722e6/l/ccac384646bb48a4896b75d05d7a9da6, entries=12, sequenceid=27, filesize=5.6 K 2024-12-10T14:59:21,179 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 763faff5dd874fa35c8dd37f5437c5e6 in 38ms, sequenceid=6, compaction requested=false 2024-12-10T14:59:21,179 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~1.38 KB/1412, heapSize ~3.31 KB/3392, currentSize=0 B/0 for 99bb2446a6b84f36bf016b19fe4722e6 in 38ms, sequenceid=27, compaction requested=false 2024-12-10T14:59:21,182 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/hbase/namespace/763faff5dd874fa35c8dd37f5437c5e6/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-10T14:59:21,183 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/hbase/acl/99bb2446a6b84f36bf016b19fe4722e6/recovered.edits/30.seqid, newMaxSeqId=30, maxSeqId=1 2024-12-10T14:59:21,183 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-10T14:59:21,183 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-10T14:59:21,183 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1733842505989.763faff5dd874fa35c8dd37f5437c5e6. 2024-12-10T14:59:21,183 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 763faff5dd874fa35c8dd37f5437c5e6: 2024-12-10T14:59:21,183 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:acl,,1733842506693.99bb2446a6b84f36bf016b19fe4722e6. 2024-12-10T14:59:21,183 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 99bb2446a6b84f36bf016b19fe4722e6: 2024-12-10T14:59:21,183 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1733842505989.763faff5dd874fa35c8dd37f5437c5e6. 2024-12-10T14:59:21,183 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:acl,,1733842506693.99bb2446a6b84f36bf016b19fe4722e6. 2024-12-10T14:59:21,183 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 3d3c5d37b59f3a27bd963f36e8591d10, disabling compactions & flushes 2024-12-10T14:59:21,183 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region testExportExpiredSnapshot,,1733842638953.3d3c5d37b59f3a27bd963f36e8591d10. 2024-12-10T14:59:21,184 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportExpiredSnapshot,,1733842638953.3d3c5d37b59f3a27bd963f36e8591d10. 2024-12-10T14:59:21,184 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on testExportExpiredSnapshot,,1733842638953.3d3c5d37b59f3a27bd963f36e8591d10. after waiting 0 ms 2024-12-10T14:59:21,184 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region testExportExpiredSnapshot,,1733842638953.3d3c5d37b59f3a27bd963f36e8591d10. 2024-12-10T14:59:21,190 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/default/testExportExpiredSnapshot/3d3c5d37b59f3a27bd963f36e8591d10/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-10T14:59:21,191 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-10T14:59:21,191 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed testExportExpiredSnapshot,,1733842638953.3d3c5d37b59f3a27bd963f36e8591d10. 2024-12-10T14:59:21,191 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 3d3c5d37b59f3a27bd963f36e8591d10: 2024-12-10T14:59:21,191 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,,1733842638953.3d3c5d37b59f3a27bd963f36e8591d10. 2024-12-10T14:59:21,193 INFO [regionserver/db1d50717577:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-10T14:59:21,195 DEBUG [RS_CLOSE_META-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/hbase/meta/1588230740/.tmp/rep_barrier/91b55ef21de74a72812daa8b3c365b86 is 133, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1733842618177.7989d98b9d2c3897b7c4cfb24cffcca9./rep_barrier:/1733842636639/DeleteFamily/seqid=0 2024-12-10T14:59:21,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742382_1558 (size=8007) 2024-12-10T14:59:21,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742382_1558 (size=8007) 2024-12-10T14:59:21,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742382_1558 (size=8007) 2024-12-10T14:59:21,200 INFO [RS_CLOSE_META-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.34 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/hbase/meta/1588230740/.tmp/rep_barrier/91b55ef21de74a72812daa8b3c365b86 2024-12-10T14:59:21,226 DEBUG [RS_CLOSE_META-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/hbase/meta/1588230740/.tmp/table/81244c479e884ee9982d74273ba83814 is 127, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1733842618177.7989d98b9d2c3897b7c4cfb24cffcca9./table:/1733842636639/DeleteFamily/seqid=0 2024-12-10T14:59:21,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073742383_1559 (size=8861) 2024-12-10T14:59:21,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073742383_1559 (size=8861) 2024-12-10T14:59:21,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073742383_1559 (size=8861) 2024-12-10T14:59:21,232 INFO [RS_CLOSE_META-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.06 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/hbase/meta/1588230740/.tmp/table/81244c479e884ee9982d74273ba83814 2024-12-10T14:59:21,237 DEBUG [RS_CLOSE_META-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/hbase/meta/1588230740/.tmp/info/d27f94cabdac414e9474b9dc0f921c9e as hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/hbase/meta/1588230740/info/d27f94cabdac414e9474b9dc0f921c9e 2024-12-10T14:59:21,240 INFO [RS_CLOSE_META-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/hbase/meta/1588230740/info/d27f94cabdac414e9474b9dc0f921c9e, entries=84, sequenceid=202, filesize=15.3 K 2024-12-10T14:59:21,241 DEBUG [RS_CLOSE_META-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/hbase/meta/1588230740/.tmp/rep_barrier/91b55ef21de74a72812daa8b3c365b86 as hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/hbase/meta/1588230740/rep_barrier/91b55ef21de74a72812daa8b3c365b86 2024-12-10T14:59:21,245 INFO [RS_CLOSE_META-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/hbase/meta/1588230740/rep_barrier/91b55ef21de74a72812daa8b3c365b86, entries=21, sequenceid=202, filesize=7.8 K 2024-12-10T14:59:21,245 DEBUG [RS_CLOSE_META-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/hbase/meta/1588230740/.tmp/table/81244c479e884ee9982d74273ba83814 as hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/hbase/meta/1588230740/table/81244c479e884ee9982d74273ba83814 2024-12-10T14:59:21,249 INFO [RS_CLOSE_META-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/hbase/meta/1588230740/table/81244c479e884ee9982d74273ba83814, entries=38, sequenceid=202, filesize=8.7 K 2024-12-10T14:59:21,250 INFO [RS_CLOSE_META-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~68.66 KB/70312, heapSize ~108.95 KB/111568, currentSize=0 B/0 for 1588230740 in 108ms, sequenceid=202, compaction requested=false 2024-12-10T14:59:21,253 DEBUG [RS_CLOSE_META-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/data/hbase/meta/1588230740/recovered.edits/205.seqid, newMaxSeqId=205, maxSeqId=1 2024-12-10T14:59:21,253 DEBUG [RS_CLOSE_META-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-10T14:59:21,253 DEBUG [RS_CLOSE_META-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-10T14:59:21,253 INFO [RS_CLOSE_META-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-10T14:59:21,253 DEBUG [RS_CLOSE_META-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-10T14:59:21,253 DEBUG [RS_CLOSE_META-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-10T14:59:21,341 INFO [RS:1;db1d50717577:37707 {}] regionserver.HRegionServer(1250): stopping server db1d50717577,37707,1733842502956; all regions closed. 2024-12-10T14:59:21,341 INFO [RS:0;db1d50717577:41775 {}] regionserver.HRegionServer(1250): stopping server db1d50717577,41775,1733842502887; all regions closed. 2024-12-10T14:59:21,341 INFO [RS:2;db1d50717577:45255 {}] regionserver.HRegionServer(1250): stopping server db1d50717577,45255,1733842503015; all regions closed. 2024-12-10T14:59:21,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741833_1009 (size=17071) 2024-12-10T14:59:21,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741833_1009 (size=17071) 2024-12-10T14:59:21,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741836_1012 (size=80694) 2024-12-10T14:59:21,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741836_1012 (size=80694) 2024-12-10T14:59:21,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741836_1012 (size=80694) 2024-12-10T14:59:21,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741833_1009 (size=17071) 2024-12-10T14:59:21,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741835_1011 (size=10830) 2024-12-10T14:59:21,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741835_1011 (size=10830) 2024-12-10T14:59:21,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741835_1011 (size=10830) 2024-12-10T14:59:21,349 DEBUG [RS:2;db1d50717577:45255 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/oldWALs 2024-12-10T14:59:21,349 INFO [RS:2;db1d50717577:45255 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL db1d50717577%2C45255%2C1733842503015:(num 1733842505256) 2024-12-10T14:59:21,349 DEBUG [RS:2;db1d50717577:45255 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:59:21,349 DEBUG [RS:1;db1d50717577:37707 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/oldWALs 2024-12-10T14:59:21,349 INFO [RS:2;db1d50717577:45255 {}] regionserver.LeaseManager(133): Closed leases 2024-12-10T14:59:21,349 INFO [RS:1;db1d50717577:37707 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL db1d50717577%2C37707%2C1733842502956.meta:.meta(num 1733842505737) 2024-12-10T14:59:21,349 DEBUG [RS:0;db1d50717577:41775 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/oldWALs 2024-12-10T14:59:21,349 INFO [RS:0;db1d50717577:41775 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL db1d50717577%2C41775%2C1733842502887:(num 1733842505252) 2024-12-10T14:59:21,349 DEBUG [RS:0;db1d50717577:41775 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:59:21,349 INFO [RS:0;db1d50717577:41775 {}] regionserver.LeaseManager(133): Closed leases 2024-12-10T14:59:21,349 INFO [RS:2;db1d50717577:45255 {}] hbase.ChoreService(370): Chore service for: regionserver/db1d50717577:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-12-10T14:59:21,349 INFO [RS:0;db1d50717577:41775 {}] hbase.ChoreService(370): Chore service for: regionserver/db1d50717577:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-12-10T14:59:21,349 INFO [RS:0;db1d50717577:41775 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-10T14:59:21,349 INFO [RS:2;db1d50717577:45255 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-10T14:59:21,349 INFO [RS:0;db1d50717577:41775 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-10T14:59:21,349 INFO [RS:2;db1d50717577:45255 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-10T14:59:21,349 INFO [RS:0;db1d50717577:41775 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-10T14:59:21,350 INFO [RS:2;db1d50717577:45255 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-10T14:59:21,350 INFO [regionserver/db1d50717577:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-10T14:59:21,350 INFO [RS:2;db1d50717577:45255 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:45255 2024-12-10T14:59:21,351 INFO [RS:0;db1d50717577:41775 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:41775 2024-12-10T14:59:21,351 INFO [regionserver/db1d50717577:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-10T14:59:21,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39483 is added to blk_1073741834_1010 (size=10373) 2024-12-10T14:59:21,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34057 is added to blk_1073741834_1010 (size=10373) 2024-12-10T14:59:21,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39383 is added to blk_1073741834_1010 (size=10373) 2024-12-10T14:59:21,356 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-10T14:59:21,356 DEBUG [RS:1;db1d50717577:37707 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/oldWALs 2024-12-10T14:59:21,356 INFO [RS:1;db1d50717577:37707 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL db1d50717577%2C37707%2C1733842502956:(num 1733842505252) 2024-12-10T14:59:21,356 DEBUG [RS:1;db1d50717577:37707 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:59:21,356 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45255-0x1019cdee8310003, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/db1d50717577,45255,1733842503015 2024-12-10T14:59:21,356 INFO [RS:1;db1d50717577:37707 {}] regionserver.LeaseManager(133): Closed leases 2024-12-10T14:59:21,357 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/db1d50717577,41775,1733842502887 2024-12-10T14:59:21,357 INFO [RS:1;db1d50717577:37707 {}] hbase.ChoreService(370): Chore service for: regionserver/db1d50717577:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-12-10T14:59:21,357 INFO [regionserver/db1d50717577:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-10T14:59:21,357 INFO [RS:1;db1d50717577:37707 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:37707 2024-12-10T14:59:21,359 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [db1d50717577,45255,1733842503015] 2024-12-10T14:59:21,359 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing db1d50717577,45255,1733842503015; numProcessing=1 2024-12-10T14:59:21,361 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-10T14:59:21,361 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/db1d50717577,37707,1733842502956 2024-12-10T14:59:21,362 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/db1d50717577,45255,1733842503015 already deleted, retry=false 2024-12-10T14:59:21,362 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; db1d50717577,45255,1733842503015 expired; onlineServers=2 2024-12-10T14:59:21,362 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [db1d50717577,41775,1733842502887] 2024-12-10T14:59:21,362 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing db1d50717577,41775,1733842502887; numProcessing=2 2024-12-10T14:59:21,364 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/db1d50717577,41775,1733842502887 already deleted, retry=false 2024-12-10T14:59:21,364 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; db1d50717577,41775,1733842502887 expired; onlineServers=1 2024-12-10T14:59:21,365 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [db1d50717577,37707,1733842502956] 2024-12-10T14:59:21,365 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing db1d50717577,37707,1733842502956; numProcessing=3 2024-12-10T14:59:21,367 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/db1d50717577,37707,1733842502956 already deleted, retry=false 2024-12-10T14:59:21,367 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; db1d50717577,37707,1733842502956 expired; onlineServers=0 2024-12-10T14:59:21,367 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'db1d50717577,36533,1733842502047' ***** 2024-12-10T14:59:21,367 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-10T14:59:21,368 DEBUG [M:0;db1d50717577:36533 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@259fc668, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=db1d50717577/172.17.0.2:0 2024-12-10T14:59:21,368 INFO [M:0;db1d50717577:36533 {}] regionserver.HRegionServer(1224): stopping server db1d50717577,36533,1733842502047 2024-12-10T14:59:21,368 INFO [M:0;db1d50717577:36533 {}] regionserver.HRegionServer(1250): stopping server db1d50717577,36533,1733842502047; all regions closed. 2024-12-10T14:59:21,368 DEBUG [M:0;db1d50717577:36533 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:59:21,368 DEBUG [M:0;db1d50717577:36533 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-10T14:59:21,368 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-10T14:59:21,368 DEBUG [M:0;db1d50717577:36533 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-10T14:59:21,368 DEBUG [master/db1d50717577:0:becomeActiveMaster-HFileCleaner.small.0-1733842504845 {}] cleaner.HFileCleaner(306): Exit Thread[master/db1d50717577:0:becomeActiveMaster-HFileCleaner.small.0-1733842504845,5,FailOnTimeoutGroup] 2024-12-10T14:59:21,368 DEBUG [master/db1d50717577:0:becomeActiveMaster-HFileCleaner.large.0-1733842504835 {}] cleaner.HFileCleaner(306): Exit Thread[master/db1d50717577:0:becomeActiveMaster-HFileCleaner.large.0-1733842504835,5,FailOnTimeoutGroup] 2024-12-10T14:59:21,368 INFO [M:0;db1d50717577:36533 {}] hbase.ChoreService(370): Chore service for: master/db1d50717577:0 had [] on shutdown 2024-12-10T14:59:21,368 DEBUG [M:0;db1d50717577:36533 {}] master.HMaster(1733): Stopping service threads 2024-12-10T14:59:21,368 INFO [M:0;db1d50717577:36533 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-10T14:59:21,369 INFO [M:0;db1d50717577:36533 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-10T14:59:21,369 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-10T14:59:21,369 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-10T14:59:21,369 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T14:59:21,370 DEBUG [M:0;db1d50717577:36533 {}] zookeeper.ZKUtil(347): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-10T14:59:21,370 WARN [M:0;db1d50717577:36533 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-10T14:59:21,370 INFO [M:0;db1d50717577:36533 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-10T14:59:21,370 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-10T14:59:21,370 INFO [M:0;db1d50717577:36533 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-10T14:59:21,370 DEBUG [M:0;db1d50717577:36533 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-10T14:59:21,384 INFO [M:0;db1d50717577:36533 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T14:59:21,384 DEBUG [M:0;db1d50717577:36533 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T14:59:21,384 DEBUG [M:0;db1d50717577:36533 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-10T14:59:21,384 DEBUG [M:0;db1d50717577:36533 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T14:59:21,384 INFO [M:0;db1d50717577:36533 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=805.44 KB heapSize=966.77 KB 2024-12-10T14:59:21,384 ERROR [AsyncFSWAL-0-hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/MasterData-prefix:db1d50717577,36533,1733842502047 {}] server.NIOServerCnxnFactory(85): Thread Thread[AsyncFSWAL-0-hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/MasterData-prefix:db1d50717577,36533,1733842502047,5,FailOnTimeoutGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.readableBytes()" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.buffered(FanOutOneBlockAsyncDFSOutput.java:419) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.append(AsyncProtobufLogWriter.java:132) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:830) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:128) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.appendEntry(AbstractFSWAL.java:1148) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.appendAndSync(AsyncFSWAL.java:500) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.consume(AsyncFSWAL.java:603) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:59:21,459 INFO [RS:2;db1d50717577:45255 {}] regionserver.HRegionServer(1307): Exiting; stopping=db1d50717577,45255,1733842503015; zookeeper connection closed. 2024-12-10T14:59:21,459 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45255-0x1019cdee8310003, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T14:59:21,459 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T14:59:21,459 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45255-0x1019cdee8310003, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T14:59:21,459 INFO [RS:0;db1d50717577:41775 {}] regionserver.HRegionServer(1307): Exiting; stopping=db1d50717577,41775,1733842502887; zookeeper connection closed. 2024-12-10T14:59:21,459 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41775-0x1019cdee8310001, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T14:59:21,459 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@34b55503 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@34b55503 2024-12-10T14:59:21,460 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7fd5f139 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7fd5f139 2024-12-10T14:59:21,463 INFO [RS:1;db1d50717577:37707 {}] regionserver.HRegionServer(1307): Exiting; stopping=db1d50717577,37707,1733842502956; zookeeper connection closed. 2024-12-10T14:59:21,463 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T14:59:21,463 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37707-0x1019cdee8310002, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T14:59:21,464 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@58f1a648 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@58f1a648 2024-12-10T14:59:21,464 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-10T14:59:22,592 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-10T14:59:22,593 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-10T14:59:22,593 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-10T14:59:22,593 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-12-10T14:59:22,594 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-10T14:59:22,594 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-10T14:59:22,594 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-10T14:59:22,594 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-10T14:59:22,594 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-12-10T14:59:26,671 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-10T14:59:30,925 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-10T15:00:00,925 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-10T15:00:03,110 DEBUG [master/db1d50717577:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=7, created chunk count=10, reused chunk count=22, reuseRatio=68.75% 2024-12-10T15:00:03,113 DEBUG [master/db1d50717577:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-12-10T15:00:11,300 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;db1d50717577:36533 222 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 38 Waited count: 13 Waiting on java.lang.ref.ReferenceQueue$Lock@56120a08 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 15 Waited count: 17 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 1 Waited count: 18 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 1 Waited count: 16 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2603339a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 3257 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 33 Waiting on java.util.concurrent.CountDownLatch$Sync@15036f65 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 12270 Waited count: 12756 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:394) app//org.apache.hadoop.hbase.MiniHBaseCluster.waitUntilShutDown(MiniHBaseCluster.java:921) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniHBaseCluster(HBaseTestingUtility.java:1359) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniCluster(HBaseTestingUtility.java:1341) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 7 Waited count: 8 Waiting on java.lang.ref.ReferenceQueue$Lock@12c786c4 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@2d67e8e5 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@263e077f): State: TIMED_WAITING Blocked count: 0 Waited count: 646 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 65 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp2988114-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe0a4428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp2988114-38): State: RUNNABLE Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe0a4428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp2988114-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe0a4428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp2988114-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe0a4428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp2988114-41-acceptor-0@26ff4d05-ServerConnector@2d3d9b09{HTTP/1.1, (http/1.1)}{localhost:45609}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp2988114-42): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp2988114-43): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp2988114-44): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-4898edba-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 25 Waited count: 3074 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20e61be0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 36449): State: TIMED_WAITING Blocked count: 1 Waited count: 34 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 65 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@1f645805): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 108 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@40205119): State: TIMED_WAITING Blocked count: 0 Waited count: 65 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 109 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 31856 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1489 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d762131 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 36449): State: TIMED_WAITING Blocked count: 59 Waited count: 2102 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 36449): State: TIMED_WAITING Blocked count: 64 Waited count: 2098 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 36449): State: TIMED_WAITING Blocked count: 45 Waited count: 2084 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 36449): State: TIMED_WAITING Blocked count: 51 Waited count: 2088 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 36449): State: TIMED_WAITING Blocked count: 60 Waited count: 2093 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@34326363): State: TIMED_WAITING Blocked count: 0 Waited count: 161 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@21d9c23a): State: TIMED_WAITING Blocked count: 0 Waited count: 65 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2b27bce4): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@6f79ee98): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1603592889)): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp671127358-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe0a4428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp671127358-88-acceptor-0@754170a3-ServerConnector@39bccd2b{HTTP/1.1, (http/1.1)}{localhost:44557}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp671127358-89): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp671127358-90): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-801d092-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@3509b2ec): State: TIMED_WAITING Blocked count: 0 Waited count: 644 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 39217): State: TIMED_WAITING Blocked count: 1 Waited count: 34 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 65 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 252 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5187d410 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-279726562-172.17.0.2-1733842498143 heartbeating to localhost/127.0.0.1:36449): State: TIMED_WAITING Blocked count: 1240 Waited count: 1340 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@3f77ee2e): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 39217): State: TIMED_WAITING Blocked count: 0 Waited count: 334 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 39217): State: TIMED_WAITING Blocked count: 0 Waited count: 335 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 39217): State: TIMED_WAITING Blocked count: 0 Waited count: 323 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 39217): State: TIMED_WAITING Blocked count: 0 Waited count: 323 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 39217): State: TIMED_WAITING Blocked count: 0 Waited count: 343 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 118 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (qtp2042583043-119): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe0a4428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp2042583043-120-acceptor-0@20ec9920-ServerConnector@5be8101a{HTTP/1.1, (http/1.1)}{localhost:37783}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp2042583043-121): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp2042583043-122): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (Session-HouseKeeper-67a2be6d-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (IPC Client (1891196410) connection to localhost/127.0.0.1:36449 from jenkins): State: TIMED_WAITING Blocked count: 1268 Waited count: 1269 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 125 (IPC Parameter Sending Thread for localhost/127.0.0.1:36449): State: TIMED_WAITING Blocked count: 0 Waited count: 1888 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@5731364a): State: TIMED_WAITING Blocked count: 0 Waited count: 643 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 36315): State: TIMED_WAITING Blocked count: 1 Waited count: 34 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 65 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 136 (Command processor): State: WAITING Blocked count: 0 Waited count: 296 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7335d5e1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 137 (BP-279726562-172.17.0.2-1733842498143 heartbeating to localhost/127.0.0.1:36449): State: TIMED_WAITING Blocked count: 1268 Waited count: 1348 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 138 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@61da8f0b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 139 (IPC Server handler 0 on default port 36315): State: TIMED_WAITING Blocked count: 0 Waited count: 326 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 1 on default port 36315): State: TIMED_WAITING Blocked count: 0 Waited count: 322 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 2 on default port 36315): State: TIMED_WAITING Blocked count: 0 Waited count: 323 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 3 on default port 36315): State: TIMED_WAITING Blocked count: 0 Waited count: 324 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 143 (IPC Server handler 4 on default port 36315): State: TIMED_WAITING Blocked count: 0 Waited count: 326 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 154 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp1446323025-155): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe0a4428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp1446323025-156-acceptor-0@2a0349e5-ServerConnector@2cc41f37{HTTP/1.1, (http/1.1)}{localhost:41005}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (qtp1446323025-157): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (qtp1446323025-158): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (Session-HouseKeeper-156b960-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 162 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data1)): State: TIMED_WAITING Blocked count: 14 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 163 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data3)): State: TIMED_WAITING Blocked count: 12 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 164 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data2)): State: TIMED_WAITING Blocked count: 16 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 165 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data4)): State: TIMED_WAITING Blocked count: 21 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 171 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data4/current/BP-279726562-172.17.0.2-1733842498143): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 175 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data1/current/BP-279726562-172.17.0.2-1733842498143): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data2/current/BP-279726562-172.17.0.2-1733842498143): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data3/current/BP-279726562-172.17.0.2-1733842498143): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 182 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 184 (ForkJoinPool-2-worker-2): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 187 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@7666605a): State: TIMED_WAITING Blocked count: 0 Waited count: 642 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 192 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 194 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 195 (IPC Server idle connection scanner for port 40961): State: TIMED_WAITING Blocked count: 1 Waited count: 34 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 196 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 65 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 205 (java.util.concurrent.ThreadPoolExecutor$Worker@15769732[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (java.util.concurrent.ThreadPoolExecutor$Worker@26615c1e[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 208 (Command processor): State: WAITING Blocked count: 1 Waited count: 263 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@38e91d19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 209 (BP-279726562-172.17.0.2-1733842498143 heartbeating to localhost/127.0.0.1:36449): State: TIMED_WAITING Blocked count: 1223 Waited count: 1339 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 210 (pool-46-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@11e89da1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 191 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 211 (IPC Server handler 0 on default port 40961): State: TIMED_WAITING Blocked count: 0 Waited count: 321 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 1 on default port 40961): State: TIMED_WAITING Blocked count: 0 Waited count: 321 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 213 (IPC Server handler 2 on default port 40961): State: TIMED_WAITING Blocked count: 0 Waited count: 321 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 214 (IPC Server handler 3 on default port 40961): State: TIMED_WAITING Blocked count: 0 Waited count: 321 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 215 (IPC Server handler 4 on default port 40961): State: TIMED_WAITING Blocked count: 0 Waited count: 321 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 218 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data5)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 219 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 223 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data5/current/BP-279726562-172.17.0.2-1733842498143): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 225 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data6/current/BP-279726562-172.17.0.2-1733842498143): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 229 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 232 (java.util.concurrent.ThreadPoolExecutor$Worker@43dbe75e[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 237 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtility.java:599) Thread 240 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 241 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:57176): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 238 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 33 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 242 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 161 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 243 (SyncThread:0): State: WAITING Blocked count: 23 Waited count: 729 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@352a9683 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 244 (ProcessThread(sid:0 cport:57176):): State: WAITING Blocked count: 0 Waited count: 816 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@339b0c82 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 245 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 851 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4b5b0e9b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 246 (NIOWorkerThread-1): State: WAITING Blocked count: 2 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1bee4ae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 280 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (RS-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 29 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (Time-limited test-SendThread(127.0.0.1:57176)): State: RUNNABLE Blocked count: 23 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 261 (Time-limited test-EventThread): State: WAITING Blocked count: 7 Waited count: 58 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7996f30f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 262 (NIOWorkerThread-2): State: WAITING Blocked count: 0 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-3): State: WAITING Blocked count: 0 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-4): State: WAITING Blocked count: 1 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (zk-event-processor-pool-0): State: WAITING Blocked count: 41 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@16fdd8a6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-5): State: WAITING Blocked count: 1 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-6): State: WAITING Blocked count: 2 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-7): State: WAITING Blocked count: 2 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-8): State: WAITING Blocked count: 0 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-9): State: WAITING Blocked count: 3 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-10): State: WAITING Blocked count: 2 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-11): State: WAITING Blocked count: 2 Waited count: 130 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-12): State: WAITING Blocked count: 3 Waited count: 130 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-13): State: WAITING Blocked count: 2 Waited count: 130 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-14): State: WAITING Blocked count: 1 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-15): State: WAITING Blocked count: 2 Waited count: 130 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-16): State: WAITING Blocked count: 1 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36533): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@6010d543 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533): State: WAITING Blocked count: 215 Waited count: 780 Waiting on java.util.concurrent.Semaphore$NonfairSync@5a386469 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533): State: WAITING Blocked count: 86 Waited count: 355 Waiting on java.util.concurrent.Semaphore$NonfairSync@14f8faaa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 281 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36533): State: WAITING Blocked count: 84 Waited count: 4933 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6c38287b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36533): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@a644272 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 283 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36533): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@a644272 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=36533): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1366747b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=36533): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@4098f117 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=36533): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@6dce628f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 287 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=36533): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@4591da96 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 291 (RS-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 313 (RS-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 335 (RS-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 93 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 289 (M:0;db1d50717577:36533): State: TIMED_WAITING Blocked count: 6 Waited count: 2588 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:879) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doSync(AsyncFSWAL.java:671) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$1(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$956/0x00007fe0a4f18000.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:590) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2869) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2811) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2670) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2644) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2635) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1810) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1631) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1586) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1569) Thread 358 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 32 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (master/db1d50717577:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 362 (master/db1d50717577:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 364 (org.apache.hadoop.hdfs.PeerCache@56b48050): State: TIMED_WAITING Blocked count: 0 Waited count: 107 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 383 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 3174 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:179) Thread 400 (RS-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 80 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 401 (RS-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 75 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 414 (Idle-Rpc-Conn-Sweeper-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 55 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 425 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 32 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 413 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 31660 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 434 (RS-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 21 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 435 (RS-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 37 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 452 (RegionServerTracker-0): State: WAITING Blocked count: 8 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@aace79a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 482 (regionserver/db1d50717577:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f58a5f7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 484 (regionserver/db1d50717577:0.procedureResultReporter): State: WAITING Blocked count: 20 Waited count: 33 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@309b02bc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 483 (regionserver/db1d50717577:0.procedureResultReporter): State: WAITING Blocked count: 13 Waited count: 23 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@57da061c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 522 (RS-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 533 (RS-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 535 (region-location-0): State: WAITING Blocked count: 9 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3aa7bb4a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 561 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 31461 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 575 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 593 (ForkJoinPool.commonPool-worker-2): State: TIMED_WAITING Blocked count: 0 Waited count: 641 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 594 (ForkJoinPool.commonPool-worker-3): State: WAITING Blocked count: 0 Waited count: 366 Waiting on java.util.concurrent.ForkJoinPool@30dad6f4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 607 (RS-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 619 (region-location-1): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3aa7bb4a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 620 (region-location-2): State: WAITING Blocked count: 3 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3aa7bb4a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 621 (region-location-3): State: WAITING Blocked count: 1 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3aa7bb4a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1022 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 366 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1083 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 11 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1119 (RS-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1122 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 65 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fd1bb4c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1180 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1536 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@58ab2617 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1919 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3269 (region-location-4): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3aa7bb4a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4787 (ForkJoinPool.commonPool-worker-4): State: WAITING Blocked count: 0 Waited count: 175 Waiting on java.util.concurrent.ForkJoinPool@30dad6f4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 4959 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4960 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4961 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8827 (AsyncFSWAL-1-hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/MasterData-prefix:db1d50717577,36533,1733842502047): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3e203680 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8831 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-10T15:00:30,925 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-10T15:01:00,926 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;db1d50717577:36533 220 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 38 Waited count: 13 Waiting on java.lang.ref.ReferenceQueue$Lock@56120a08 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 15 Waited count: 18 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 1 Waited count: 21 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 1 Waited count: 19 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2603339a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 3857 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 39 Waiting on java.util.concurrent.CountDownLatch$Sync@519d1c3e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 12270 Waited count: 12757 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:394) app//org.apache.hadoop.hbase.MiniHBaseCluster.waitUntilShutDown(MiniHBaseCluster.java:921) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniHBaseCluster(HBaseTestingUtility.java:1359) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniCluster(HBaseTestingUtility.java:1341) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 7 Waited count: 8 Waiting on java.lang.ref.ReferenceQueue$Lock@12c786c4 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@2d67e8e5 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@263e077f): State: TIMED_WAITING Blocked count: 0 Waited count: 766 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 77 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp2988114-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe0a4428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp2988114-38): State: RUNNABLE Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe0a4428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp2988114-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe0a4428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp2988114-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe0a4428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp2988114-41-acceptor-0@26ff4d05-ServerConnector@2d3d9b09{HTTP/1.1, (http/1.1)}{localhost:45609}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp2988114-42): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp2988114-43): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp2988114-44): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-4898edba-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 25 Waited count: 3074 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20e61be0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 36449): State: TIMED_WAITING Blocked count: 1 Waited count: 40 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 77 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@1f645805): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 128 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@40205119): State: TIMED_WAITING Blocked count: 0 Waited count: 77 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 129 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 37821 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1489 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d762131 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 36449): State: TIMED_WAITING Blocked count: 59 Waited count: 2163 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 36449): State: TIMED_WAITING Blocked count: 64 Waited count: 2160 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 36449): State: TIMED_WAITING Blocked count: 45 Waited count: 2145 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 36449): State: TIMED_WAITING Blocked count: 51 Waited count: 2150 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 36449): State: TIMED_WAITING Blocked count: 60 Waited count: 2154 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@34326363): State: TIMED_WAITING Blocked count: 0 Waited count: 191 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@21d9c23a): State: TIMED_WAITING Blocked count: 0 Waited count: 77 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2b27bce4): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@6f79ee98): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1603592889)): State: TIMED_WAITING Blocked count: 0 Waited count: 14 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp671127358-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe0a4428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp671127358-88-acceptor-0@754170a3-ServerConnector@39bccd2b{HTTP/1.1, (http/1.1)}{localhost:44557}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp671127358-89): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp671127358-90): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-801d092-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@3509b2ec): State: TIMED_WAITING Blocked count: 0 Waited count: 764 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 39217): State: TIMED_WAITING Blocked count: 1 Waited count: 40 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 77 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 272 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5187d410 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-279726562-172.17.0.2-1733842498143 heartbeating to localhost/127.0.0.1:36449): State: TIMED_WAITING Blocked count: 1260 Waited count: 1380 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@3f77ee2e): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 39217): State: TIMED_WAITING Blocked count: 0 Waited count: 412 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 39217): State: TIMED_WAITING Blocked count: 0 Waited count: 398 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 39217): State: TIMED_WAITING Blocked count: 0 Waited count: 383 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 39217): State: TIMED_WAITING Blocked count: 0 Waited count: 383 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 39217): State: TIMED_WAITING Blocked count: 0 Waited count: 411 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 118 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (qtp2042583043-119): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe0a4428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp2042583043-120-acceptor-0@20ec9920-ServerConnector@5be8101a{HTTP/1.1, (http/1.1)}{localhost:37783}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp2042583043-121): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp2042583043-122): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (Session-HouseKeeper-67a2be6d-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (IPC Client (1891196410) connection to localhost/127.0.0.1:36449 from jenkins): State: TIMED_WAITING Blocked count: 1328 Waited count: 1329 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 125 (IPC Parameter Sending Thread for localhost/127.0.0.1:36449): State: TIMED_WAITING Blocked count: 0 Waited count: 1948 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@5731364a): State: TIMED_WAITING Blocked count: 0 Waited count: 763 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 36315): State: TIMED_WAITING Blocked count: 1 Waited count: 40 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 77 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 136 (Command processor): State: WAITING Blocked count: 0 Waited count: 316 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7335d5e1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 137 (BP-279726562-172.17.0.2-1733842498143 heartbeating to localhost/127.0.0.1:36449): State: TIMED_WAITING Blocked count: 1288 Waited count: 1388 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 138 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@61da8f0b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 139 (IPC Server handler 0 on default port 36315): State: TIMED_WAITING Blocked count: 0 Waited count: 386 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 1 on default port 36315): State: TIMED_WAITING Blocked count: 0 Waited count: 382 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 2 on default port 36315): State: TIMED_WAITING Blocked count: 0 Waited count: 383 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 3 on default port 36315): State: TIMED_WAITING Blocked count: 0 Waited count: 384 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 143 (IPC Server handler 4 on default port 36315): State: TIMED_WAITING Blocked count: 0 Waited count: 386 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 154 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp1446323025-155): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe0a4428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp1446323025-156-acceptor-0@2a0349e5-ServerConnector@2cc41f37{HTTP/1.1, (http/1.1)}{localhost:41005}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (qtp1446323025-157): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (qtp1446323025-158): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (Session-HouseKeeper-156b960-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 162 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data1)): State: TIMED_WAITING Blocked count: 14 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 163 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data3)): State: TIMED_WAITING Blocked count: 12 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 164 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data2)): State: TIMED_WAITING Blocked count: 16 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 165 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data4)): State: TIMED_WAITING Blocked count: 21 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 171 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data4/current/BP-279726562-172.17.0.2-1733842498143): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 175 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data1/current/BP-279726562-172.17.0.2-1733842498143): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data2/current/BP-279726562-172.17.0.2-1733842498143): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data3/current/BP-279726562-172.17.0.2-1733842498143): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 182 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 187 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@7666605a): State: TIMED_WAITING Blocked count: 0 Waited count: 762 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 192 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 194 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 195 (IPC Server idle connection scanner for port 40961): State: TIMED_WAITING Blocked count: 1 Waited count: 40 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 196 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 77 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 205 (java.util.concurrent.ThreadPoolExecutor$Worker@15769732[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (java.util.concurrent.ThreadPoolExecutor$Worker@26615c1e[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 208 (Command processor): State: WAITING Blocked count: 1 Waited count: 283 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@38e91d19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 209 (BP-279726562-172.17.0.2-1733842498143 heartbeating to localhost/127.0.0.1:36449): State: TIMED_WAITING Blocked count: 1243 Waited count: 1379 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 210 (pool-46-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@11e89da1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 191 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 211 (IPC Server handler 0 on default port 40961): State: TIMED_WAITING Blocked count: 0 Waited count: 381 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 1 on default port 40961): State: TIMED_WAITING Blocked count: 0 Waited count: 381 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 213 (IPC Server handler 2 on default port 40961): State: TIMED_WAITING Blocked count: 0 Waited count: 381 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 214 (IPC Server handler 3 on default port 40961): State: TIMED_WAITING Blocked count: 0 Waited count: 381 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 215 (IPC Server handler 4 on default port 40961): State: TIMED_WAITING Blocked count: 0 Waited count: 381 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 218 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data5)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 219 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 223 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data5/current/BP-279726562-172.17.0.2-1733842498143): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 225 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data6/current/BP-279726562-172.17.0.2-1733842498143): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 229 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 232 (java.util.concurrent.ThreadPoolExecutor$Worker@43dbe75e[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 237 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtility.java:599) Thread 240 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 241 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:57176): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 238 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 39 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 242 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 191 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 243 (SyncThread:0): State: WAITING Blocked count: 23 Waited count: 734 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@352a9683 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 244 (ProcessThread(sid:0 cport:57176):): State: WAITING Blocked count: 0 Waited count: 821 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@339b0c82 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 245 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 856 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4b5b0e9b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 246 (NIOWorkerThread-1): State: WAITING Blocked count: 2 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1bee4ae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 310 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (RS-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 29 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (Time-limited test-SendThread(127.0.0.1:57176)): State: RUNNABLE Blocked count: 23 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 261 (Time-limited test-EventThread): State: WAITING Blocked count: 7 Waited count: 58 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7996f30f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 262 (NIOWorkerThread-2): State: WAITING Blocked count: 0 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-3): State: WAITING Blocked count: 0 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-4): State: WAITING Blocked count: 1 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (zk-event-processor-pool-0): State: WAITING Blocked count: 41 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@16fdd8a6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-5): State: WAITING Blocked count: 1 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-6): State: WAITING Blocked count: 2 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-7): State: WAITING Blocked count: 2 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-8): State: WAITING Blocked count: 0 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-9): State: WAITING Blocked count: 3 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-10): State: WAITING Blocked count: 2 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-11): State: WAITING Blocked count: 2 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-12): State: WAITING Blocked count: 3 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-13): State: WAITING Blocked count: 2 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-14): State: WAITING Blocked count: 1 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-15): State: WAITING Blocked count: 2 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-16): State: WAITING Blocked count: 1 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36533): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@6010d543 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533): State: WAITING Blocked count: 215 Waited count: 780 Waiting on java.util.concurrent.Semaphore$NonfairSync@5a386469 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533): State: WAITING Blocked count: 86 Waited count: 355 Waiting on java.util.concurrent.Semaphore$NonfairSync@14f8faaa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 281 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36533): State: WAITING Blocked count: 84 Waited count: 4933 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6c38287b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36533): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@a644272 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 283 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36533): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@a644272 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=36533): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1366747b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=36533): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@4098f117 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=36533): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@6dce628f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 287 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=36533): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@4591da96 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 291 (RS-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 313 (RS-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 335 (RS-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 93 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 289 (M:0;db1d50717577:36533): State: TIMED_WAITING Blocked count: 6 Waited count: 2588 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:879) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doSync(AsyncFSWAL.java:671) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$1(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$956/0x00007fe0a4f18000.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:590) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2869) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2811) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2670) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2644) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2635) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1810) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1631) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1586) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1569) Thread 358 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 38 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (master/db1d50717577:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 362 (master/db1d50717577:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 364 (org.apache.hadoop.hdfs.PeerCache@56b48050): State: TIMED_WAITING Blocked count: 0 Waited count: 127 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 383 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 3774 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:179) Thread 400 (RS-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 80 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 401 (RS-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 75 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 414 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 68 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@372912a2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 425 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 38 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 413 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 37663 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 434 (RS-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 21 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 435 (RS-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 37 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 452 (RegionServerTracker-0): State: WAITING Blocked count: 8 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@aace79a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 482 (regionserver/db1d50717577:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f58a5f7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 484 (regionserver/db1d50717577:0.procedureResultReporter): State: WAITING Blocked count: 20 Waited count: 33 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@309b02bc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 483 (regionserver/db1d50717577:0.procedureResultReporter): State: WAITING Blocked count: 13 Waited count: 23 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@57da061c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 522 (RS-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 533 (RS-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 535 (region-location-0): State: WAITING Blocked count: 9 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3aa7bb4a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 561 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 37464 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 575 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 594 (ForkJoinPool.commonPool-worker-3): State: WAITING Blocked count: 0 Waited count: 366 Waiting on java.util.concurrent.ForkJoinPool@30dad6f4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 607 (RS-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 619 (region-location-1): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3aa7bb4a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 620 (region-location-2): State: WAITING Blocked count: 3 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3aa7bb4a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 621 (region-location-3): State: WAITING Blocked count: 1 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3aa7bb4a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1022 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 372 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1083 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 11 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1119 (RS-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1122 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 65 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fd1bb4c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1180 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1536 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@58ab2617 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1919 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3269 (region-location-4): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3aa7bb4a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4787 (ForkJoinPool.commonPool-worker-4): State: TIMED_WAITING Blocked count: 0 Waited count: 176 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 4959 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4960 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4961 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8827 (AsyncFSWAL-1-hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/MasterData-prefix:db1d50717577,36533,1733842502047): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3e203680 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8831 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-10T15:01:30,926 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-10T15:02:00,926 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;db1d50717577:36533 219 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 45 Waited count: 14 Waiting on java.lang.ref.ReferenceQueue$Lock@56120a08 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 16 Waited count: 20 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 1 Waited count: 24 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 1 Waited count: 22 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2603339a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 4456 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 45 Waiting on java.util.concurrent.CountDownLatch$Sync@50070384 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 12270 Waited count: 12758 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:394) app//org.apache.hadoop.hbase.MiniHBaseCluster.waitUntilShutDown(MiniHBaseCluster.java:921) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniHBaseCluster(HBaseTestingUtility.java:1359) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniCluster(HBaseTestingUtility.java:1341) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 8 Waited count: 9 Waiting on java.lang.ref.ReferenceQueue$Lock@12c786c4 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@2d67e8e5 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@263e077f): State: TIMED_WAITING Blocked count: 0 Waited count: 886 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 89 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp2988114-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe0a4428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp2988114-38): State: RUNNABLE Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe0a4428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp2988114-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe0a4428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp2988114-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe0a4428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp2988114-41-acceptor-0@26ff4d05-ServerConnector@2d3d9b09{HTTP/1.1, (http/1.1)}{localhost:45609}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp2988114-42): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp2988114-43): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp2988114-44): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-4898edba-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 25 Waited count: 3074 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20e61be0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 36449): State: TIMED_WAITING Blocked count: 1 Waited count: 46 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 89 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@1f645805): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 148 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@40205119): State: TIMED_WAITING Blocked count: 0 Waited count: 89 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 149 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 43786 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1489 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d762131 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 36449): State: TIMED_WAITING Blocked count: 59 Waited count: 2224 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 36449): State: TIMED_WAITING Blocked count: 64 Waited count: 2221 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 36449): State: TIMED_WAITING Blocked count: 45 Waited count: 2207 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 36449): State: TIMED_WAITING Blocked count: 51 Waited count: 2211 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 36449): State: TIMED_WAITING Blocked count: 60 Waited count: 2216 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@34326363): State: TIMED_WAITING Blocked count: 0 Waited count: 222 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@21d9c23a): State: TIMED_WAITING Blocked count: 0 Waited count: 89 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2b27bce4): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@6f79ee98): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1603592889)): State: TIMED_WAITING Blocked count: 0 Waited count: 16 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp671127358-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe0a4428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp671127358-88-acceptor-0@754170a3-ServerConnector@39bccd2b{HTTP/1.1, (http/1.1)}{localhost:44557}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp671127358-89): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp671127358-90): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-801d092-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@3509b2ec): State: TIMED_WAITING Blocked count: 0 Waited count: 884 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 39217): State: TIMED_WAITING Blocked count: 1 Waited count: 46 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 89 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 293 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5187d410 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-279726562-172.17.0.2-1733842498143 heartbeating to localhost/127.0.0.1:36449): State: TIMED_WAITING Blocked count: 1281 Waited count: 1422 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@3f77ee2e): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 39217): State: TIMED_WAITING Blocked count: 0 Waited count: 477 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 39217): State: TIMED_WAITING Blocked count: 0 Waited count: 462 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 39217): State: TIMED_WAITING Blocked count: 0 Waited count: 443 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 39217): State: TIMED_WAITING Blocked count: 0 Waited count: 443 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 39217): State: TIMED_WAITING Blocked count: 0 Waited count: 476 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 118 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (qtp2042583043-119): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe0a4428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp2042583043-120-acceptor-0@20ec9920-ServerConnector@5be8101a{HTTP/1.1, (http/1.1)}{localhost:37783}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp2042583043-121): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp2042583043-122): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (Session-HouseKeeper-67a2be6d-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (IPC Client (1891196410) connection to localhost/127.0.0.1:36449 from jenkins): State: TIMED_WAITING Blocked count: 1390 Waited count: 1391 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 125 (IPC Parameter Sending Thread for localhost/127.0.0.1:36449): State: TIMED_WAITING Blocked count: 0 Waited count: 2010 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@5731364a): State: TIMED_WAITING Blocked count: 0 Waited count: 883 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 36315): State: TIMED_WAITING Blocked count: 1 Waited count: 46 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 89 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 136 (Command processor): State: WAITING Blocked count: 0 Waited count: 337 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7335d5e1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 137 (BP-279726562-172.17.0.2-1733842498143 heartbeating to localhost/127.0.0.1:36449): State: TIMED_WAITING Blocked count: 1309 Waited count: 1430 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 138 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@61da8f0b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 139 (IPC Server handler 0 on default port 36315): State: TIMED_WAITING Blocked count: 0 Waited count: 447 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 1 on default port 36315): State: TIMED_WAITING Blocked count: 0 Waited count: 442 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 2 on default port 36315): State: TIMED_WAITING Blocked count: 0 Waited count: 443 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 3 on default port 36315): State: TIMED_WAITING Blocked count: 0 Waited count: 444 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 143 (IPC Server handler 4 on default port 36315): State: TIMED_WAITING Blocked count: 0 Waited count: 446 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 154 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp1446323025-155): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe0a4428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp1446323025-156-acceptor-0@2a0349e5-ServerConnector@2cc41f37{HTTP/1.1, (http/1.1)}{localhost:41005}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (qtp1446323025-157): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (qtp1446323025-158): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (Session-HouseKeeper-156b960-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 162 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data1)): State: TIMED_WAITING Blocked count: 14 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 163 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data3)): State: TIMED_WAITING Blocked count: 12 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 164 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data2)): State: TIMED_WAITING Blocked count: 16 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 165 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data4)): State: TIMED_WAITING Blocked count: 21 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 171 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data4/current/BP-279726562-172.17.0.2-1733842498143): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 175 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data1/current/BP-279726562-172.17.0.2-1733842498143): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data2/current/BP-279726562-172.17.0.2-1733842498143): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data3/current/BP-279726562-172.17.0.2-1733842498143): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 182 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 187 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@7666605a): State: TIMED_WAITING Blocked count: 0 Waited count: 882 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 192 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 194 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 195 (IPC Server idle connection scanner for port 40961): State: TIMED_WAITING Blocked count: 1 Waited count: 46 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 196 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 89 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 205 (java.util.concurrent.ThreadPoolExecutor$Worker@15769732[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (java.util.concurrent.ThreadPoolExecutor$Worker@26615c1e[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 208 (Command processor): State: WAITING Blocked count: 1 Waited count: 303 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@38e91d19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 209 (BP-279726562-172.17.0.2-1733842498143 heartbeating to localhost/127.0.0.1:36449): State: TIMED_WAITING Blocked count: 1263 Waited count: 1419 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 210 (pool-46-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@11e89da1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 191 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 211 (IPC Server handler 0 on default port 40961): State: TIMED_WAITING Blocked count: 0 Waited count: 441 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 1 on default port 40961): State: TIMED_WAITING Blocked count: 0 Waited count: 441 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 213 (IPC Server handler 2 on default port 40961): State: TIMED_WAITING Blocked count: 0 Waited count: 441 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 214 (IPC Server handler 3 on default port 40961): State: TIMED_WAITING Blocked count: 0 Waited count: 441 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 215 (IPC Server handler 4 on default port 40961): State: TIMED_WAITING Blocked count: 0 Waited count: 441 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 218 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data5)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 219 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 223 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data5/current/BP-279726562-172.17.0.2-1733842498143): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 225 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data6/current/BP-279726562-172.17.0.2-1733842498143): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 229 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 232 (java.util.concurrent.ThreadPoolExecutor$Worker@43dbe75e[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 237 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 15 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtility.java:599) Thread 240 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 241 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:57176): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 238 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 45 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 242 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 221 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 243 (SyncThread:0): State: WAITING Blocked count: 23 Waited count: 738 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@352a9683 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 244 (ProcessThread(sid:0 cport:57176):): State: WAITING Blocked count: 0 Waited count: 825 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@339b0c82 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 245 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 860 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4b5b0e9b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 246 (NIOWorkerThread-1): State: WAITING Blocked count: 2 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1bee4ae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 338 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (RS-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 29 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (Time-limited test-SendThread(127.0.0.1:57176)): State: RUNNABLE Blocked count: 23 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 261 (Time-limited test-EventThread): State: WAITING Blocked count: 7 Waited count: 58 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7996f30f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 262 (NIOWorkerThread-2): State: WAITING Blocked count: 0 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-3): State: WAITING Blocked count: 0 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-4): State: WAITING Blocked count: 1 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (zk-event-processor-pool-0): State: WAITING Blocked count: 41 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@16fdd8a6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-5): State: WAITING Blocked count: 1 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-6): State: WAITING Blocked count: 2 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-7): State: WAITING Blocked count: 2 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-8): State: WAITING Blocked count: 0 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-9): State: WAITING Blocked count: 3 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-10): State: WAITING Blocked count: 2 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-11): State: WAITING Blocked count: 2 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-12): State: WAITING Blocked count: 3 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-13): State: WAITING Blocked count: 2 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-14): State: WAITING Blocked count: 1 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-15): State: WAITING Blocked count: 2 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-16): State: WAITING Blocked count: 1 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36533): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@6010d543 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533): State: WAITING Blocked count: 215 Waited count: 780 Waiting on java.util.concurrent.Semaphore$NonfairSync@5a386469 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533): State: WAITING Blocked count: 86 Waited count: 355 Waiting on java.util.concurrent.Semaphore$NonfairSync@14f8faaa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 281 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36533): State: WAITING Blocked count: 84 Waited count: 4933 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6c38287b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36533): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@a644272 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 283 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36533): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@a644272 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=36533): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1366747b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=36533): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@4098f117 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=36533): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@6dce628f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 287 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=36533): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@4591da96 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 291 (RS-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 313 (RS-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 335 (RS-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 93 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 289 (M:0;db1d50717577:36533): State: TIMED_WAITING Blocked count: 6 Waited count: 2588 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:879) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doSync(AsyncFSWAL.java:671) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$1(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$956/0x00007fe0a4f18000.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:590) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2869) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2811) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2670) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2644) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2635) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1810) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1631) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1586) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1569) Thread 358 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 44 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (master/db1d50717577:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 362 (master/db1d50717577:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 364 (org.apache.hadoop.hdfs.PeerCache@56b48050): State: TIMED_WAITING Blocked count: 0 Waited count: 147 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 383 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 4374 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:179) Thread 400 (RS-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 80 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 401 (RS-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 75 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 414 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 68 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@372912a2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 425 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 44 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 413 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 43664 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 434 (RS-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 21 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 435 (RS-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 37 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 452 (RegionServerTracker-0): State: WAITING Blocked count: 8 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@aace79a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 482 (regionserver/db1d50717577:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f58a5f7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 484 (regionserver/db1d50717577:0.procedureResultReporter): State: WAITING Blocked count: 20 Waited count: 33 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@309b02bc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 483 (regionserver/db1d50717577:0.procedureResultReporter): State: WAITING Blocked count: 13 Waited count: 23 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@57da061c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 522 (RS-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 533 (RS-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 535 (region-location-0): State: WAITING Blocked count: 9 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3aa7bb4a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 561 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 43466 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 575 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 594 (ForkJoinPool.commonPool-worker-3): State: TIMED_WAITING Blocked count: 0 Waited count: 367 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 607 (RS-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 619 (region-location-1): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3aa7bb4a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 620 (region-location-2): State: WAITING Blocked count: 3 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3aa7bb4a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 621 (region-location-3): State: WAITING Blocked count: 1 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3aa7bb4a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1022 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 378 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1083 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 11 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1119 (RS-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1122 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 65 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fd1bb4c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1180 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1536 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@58ab2617 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1919 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3269 (region-location-4): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3aa7bb4a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4959 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4960 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4961 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8827 (AsyncFSWAL-1-hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/MasterData-prefix:db1d50717577,36533,1733842502047): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3e203680 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8831 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 14 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-10T15:02:30,926 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-10T15:03:00,926 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;db1d50717577:36533 218 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 45 Waited count: 14 Waiting on java.lang.ref.ReferenceQueue$Lock@56120a08 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 16 Waited count: 21 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 1 Waited count: 27 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 1 Waited count: 25 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2603339a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 5056 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 51 Waiting on java.util.concurrent.CountDownLatch$Sync@6f2cbb32 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 12270 Waited count: 12759 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:394) app//org.apache.hadoop.hbase.MiniHBaseCluster.waitUntilShutDown(MiniHBaseCluster.java:921) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniHBaseCluster(HBaseTestingUtility.java:1359) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniCluster(HBaseTestingUtility.java:1341) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 8 Waited count: 9 Waiting on java.lang.ref.ReferenceQueue$Lock@12c786c4 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@2d67e8e5 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@263e077f): State: TIMED_WAITING Blocked count: 0 Waited count: 1006 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 101 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp2988114-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe0a4428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp2988114-38): State: RUNNABLE Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe0a4428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp2988114-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe0a4428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp2988114-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe0a4428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp2988114-41-acceptor-0@26ff4d05-ServerConnector@2d3d9b09{HTTP/1.1, (http/1.1)}{localhost:45609}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp2988114-42): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp2988114-43): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp2988114-44): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-4898edba-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 25 Waited count: 3074 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20e61be0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 36449): State: TIMED_WAITING Blocked count: 1 Waited count: 52 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 101 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@1f645805): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 168 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@40205119): State: TIMED_WAITING Blocked count: 0 Waited count: 101 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 169 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 49751 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1489 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d762131 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 36449): State: TIMED_WAITING Blocked count: 59 Waited count: 2286 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 36449): State: TIMED_WAITING Blocked count: 64 Waited count: 2282 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 36449): State: TIMED_WAITING Blocked count: 45 Waited count: 2268 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 36449): State: TIMED_WAITING Blocked count: 51 Waited count: 2272 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 36449): State: TIMED_WAITING Blocked count: 60 Waited count: 2277 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@34326363): State: TIMED_WAITING Blocked count: 0 Waited count: 252 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@21d9c23a): State: TIMED_WAITING Blocked count: 0 Waited count: 101 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2b27bce4): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@6f79ee98): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1603592889)): State: TIMED_WAITING Blocked count: 0 Waited count: 18 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp671127358-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe0a4428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp671127358-88-acceptor-0@754170a3-ServerConnector@39bccd2b{HTTP/1.1, (http/1.1)}{localhost:44557}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp671127358-89): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp671127358-90): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-801d092-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@3509b2ec): State: TIMED_WAITING Blocked count: 0 Waited count: 1004 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 39217): State: TIMED_WAITING Blocked count: 1 Waited count: 52 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 101 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 313 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5187d410 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-279726562-172.17.0.2-1733842498143 heartbeating to localhost/127.0.0.1:36449): State: TIMED_WAITING Blocked count: 1301 Waited count: 1462 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@3f77ee2e): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 39217): State: TIMED_WAITING Blocked count: 0 Waited count: 545 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 39217): State: TIMED_WAITING Blocked count: 0 Waited count: 529 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 39217): State: TIMED_WAITING Blocked count: 0 Waited count: 503 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 39217): State: TIMED_WAITING Blocked count: 0 Waited count: 503 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 39217): State: TIMED_WAITING Blocked count: 0 Waited count: 542 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 118 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (qtp2042583043-119): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe0a4428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp2042583043-120-acceptor-0@20ec9920-ServerConnector@5be8101a{HTTP/1.1, (http/1.1)}{localhost:37783}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp2042583043-121): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp2042583043-122): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (Session-HouseKeeper-67a2be6d-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (IPC Client (1891196410) connection to localhost/127.0.0.1:36449 from jenkins): State: TIMED_WAITING Blocked count: 1450 Waited count: 1451 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 125 (IPC Parameter Sending Thread for localhost/127.0.0.1:36449): State: TIMED_WAITING Blocked count: 0 Waited count: 2070 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@5731364a): State: TIMED_WAITING Blocked count: 0 Waited count: 1003 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 36315): State: TIMED_WAITING Blocked count: 1 Waited count: 52 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 101 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 136 (Command processor): State: WAITING Blocked count: 0 Waited count: 357 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7335d5e1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 137 (BP-279726562-172.17.0.2-1733842498143 heartbeating to localhost/127.0.0.1:36449): State: TIMED_WAITING Blocked count: 1329 Waited count: 1470 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 138 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@61da8f0b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 139 (IPC Server handler 0 on default port 36315): State: TIMED_WAITING Blocked count: 0 Waited count: 509 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 1 on default port 36315): State: TIMED_WAITING Blocked count: 0 Waited count: 502 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 2 on default port 36315): State: TIMED_WAITING Blocked count: 0 Waited count: 504 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 3 on default port 36315): State: TIMED_WAITING Blocked count: 0 Waited count: 506 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 143 (IPC Server handler 4 on default port 36315): State: TIMED_WAITING Blocked count: 0 Waited count: 506 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 154 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp1446323025-155): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe0a4428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp1446323025-156-acceptor-0@2a0349e5-ServerConnector@2cc41f37{HTTP/1.1, (http/1.1)}{localhost:41005}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (qtp1446323025-157): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (qtp1446323025-158): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (Session-HouseKeeper-156b960-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 162 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data1)): State: TIMED_WAITING Blocked count: 14 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 163 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data3)): State: TIMED_WAITING Blocked count: 12 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 164 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data2)): State: TIMED_WAITING Blocked count: 16 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 165 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data4)): State: TIMED_WAITING Blocked count: 21 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 171 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data4/current/BP-279726562-172.17.0.2-1733842498143): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 175 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data1/current/BP-279726562-172.17.0.2-1733842498143): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data2/current/BP-279726562-172.17.0.2-1733842498143): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data3/current/BP-279726562-172.17.0.2-1733842498143): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 182 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 187 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@7666605a): State: TIMED_WAITING Blocked count: 0 Waited count: 1002 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 192 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 194 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 195 (IPC Server idle connection scanner for port 40961): State: TIMED_WAITING Blocked count: 1 Waited count: 52 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 196 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 101 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 205 (java.util.concurrent.ThreadPoolExecutor$Worker@15769732[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (java.util.concurrent.ThreadPoolExecutor$Worker@26615c1e[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 208 (Command processor): State: WAITING Blocked count: 1 Waited count: 323 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@38e91d19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 209 (BP-279726562-172.17.0.2-1733842498143 heartbeating to localhost/127.0.0.1:36449): State: TIMED_WAITING Blocked count: 1283 Waited count: 1459 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 210 (pool-46-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@11e89da1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 191 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 211 (IPC Server handler 0 on default port 40961): State: TIMED_WAITING Blocked count: 0 Waited count: 502 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 1 on default port 40961): State: TIMED_WAITING Blocked count: 0 Waited count: 502 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 213 (IPC Server handler 2 on default port 40961): State: TIMED_WAITING Blocked count: 0 Waited count: 501 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 214 (IPC Server handler 3 on default port 40961): State: TIMED_WAITING Blocked count: 0 Waited count: 502 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 215 (IPC Server handler 4 on default port 40961): State: TIMED_WAITING Blocked count: 0 Waited count: 501 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 218 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data5)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 219 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 223 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data5/current/BP-279726562-172.17.0.2-1733842498143): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 225 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data6/current/BP-279726562-172.17.0.2-1733842498143): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 229 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 232 (java.util.concurrent.ThreadPoolExecutor$Worker@43dbe75e[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 237 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 17 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtility.java:599) Thread 240 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 241 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:57176): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 238 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 51 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 242 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 251 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 243 (SyncThread:0): State: WAITING Blocked count: 23 Waited count: 742 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@352a9683 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 244 (ProcessThread(sid:0 cport:57176):): State: WAITING Blocked count: 0 Waited count: 829 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@339b0c82 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 245 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 864 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4b5b0e9b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 246 (NIOWorkerThread-1): State: WAITING Blocked count: 2 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1bee4ae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 366 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (RS-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 29 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (Time-limited test-SendThread(127.0.0.1:57176)): State: RUNNABLE Blocked count: 23 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 261 (Time-limited test-EventThread): State: WAITING Blocked count: 7 Waited count: 58 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7996f30f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 262 (NIOWorkerThread-2): State: WAITING Blocked count: 0 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-3): State: WAITING Blocked count: 0 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-4): State: WAITING Blocked count: 1 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (zk-event-processor-pool-0): State: WAITING Blocked count: 41 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@16fdd8a6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-5): State: WAITING Blocked count: 1 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-6): State: WAITING Blocked count: 2 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-7): State: WAITING Blocked count: 2 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-8): State: WAITING Blocked count: 0 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-9): State: WAITING Blocked count: 3 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-10): State: WAITING Blocked count: 2 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-11): State: WAITING Blocked count: 2 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-12): State: WAITING Blocked count: 3 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-13): State: WAITING Blocked count: 2 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-14): State: WAITING Blocked count: 1 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-15): State: WAITING Blocked count: 2 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-16): State: WAITING Blocked count: 1 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36533): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@6010d543 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533): State: WAITING Blocked count: 215 Waited count: 780 Waiting on java.util.concurrent.Semaphore$NonfairSync@5a386469 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533): State: WAITING Blocked count: 86 Waited count: 355 Waiting on java.util.concurrent.Semaphore$NonfairSync@14f8faaa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 281 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36533): State: WAITING Blocked count: 84 Waited count: 4933 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6c38287b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36533): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@a644272 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 283 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36533): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@a644272 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=36533): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1366747b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=36533): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@4098f117 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=36533): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@6dce628f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 287 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=36533): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@4591da96 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 291 (RS-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 313 (RS-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 335 (RS-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 93 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 289 (M:0;db1d50717577:36533): State: TIMED_WAITING Blocked count: 6 Waited count: 2588 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:879) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doSync(AsyncFSWAL.java:671) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$1(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$956/0x00007fe0a4f18000.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:590) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2869) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2811) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2670) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2644) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2635) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1810) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1631) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1586) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1569) Thread 358 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 50 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (master/db1d50717577:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 362 (master/db1d50717577:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 364 (org.apache.hadoop.hdfs.PeerCache@56b48050): State: TIMED_WAITING Blocked count: 0 Waited count: 167 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 383 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 4973 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:179) Thread 400 (RS-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 80 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 401 (RS-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 75 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 414 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 68 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@372912a2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 425 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 50 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 413 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 49666 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 434 (RS-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 21 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 435 (RS-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 37 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 452 (RegionServerTracker-0): State: WAITING Blocked count: 8 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@aace79a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 482 (regionserver/db1d50717577:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f58a5f7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 484 (regionserver/db1d50717577:0.procedureResultReporter): State: WAITING Blocked count: 20 Waited count: 33 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@309b02bc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 483 (regionserver/db1d50717577:0.procedureResultReporter): State: WAITING Blocked count: 13 Waited count: 23 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@57da061c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 522 (RS-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 533 (RS-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 535 (region-location-0): State: WAITING Blocked count: 9 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3aa7bb4a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 561 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 49467 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 575 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 607 (RS-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 619 (region-location-1): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3aa7bb4a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 620 (region-location-2): State: WAITING Blocked count: 3 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3aa7bb4a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 621 (region-location-3): State: WAITING Blocked count: 1 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3aa7bb4a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1022 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 384 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1083 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 11 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1119 (RS-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1122 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 65 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fd1bb4c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1180 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1536 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@58ab2617 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1919 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3269 (region-location-4): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3aa7bb4a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4959 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4960 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4961 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8827 (AsyncFSWAL-1-hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/MasterData-prefix:db1d50717577,36533,1733842502047): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3e203680 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8831 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 20 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-10T15:03:30,927 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-10T15:04:00,927 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-10T15:04:21,385 DEBUG [M:0;db1d50717577:36533 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-10T15:04:21,385 WARN [M:0;db1d50717577:36533 {}] region.MasterRegion(134): Failed to close region org.apache.hadoop.hbase.regionserver.wal.WALSyncTimeoutIOException: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=3722, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:883) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doSync(AsyncFSWAL.java:671) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$1(AbstractFSWAL.java:600) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:600) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:590) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2869) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2811) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2670) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2644) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2635) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1810) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1631) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1586) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1569) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.closeRegion(MasterRegion.java:132) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:205) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1758) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:1285) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:603) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=3722, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:171) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:879) ~[classes/:?] ... 20 more 2024-12-10T15:04:21,386 WARN [Close-WAL-Writer-0 {}] wal.AsyncProtobufLogWriter(163): normal close failed, try recover java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.ensureWritable(int)" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.writeInt(FanOutOneBlockAsyncDFSOutput.java:396) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.lambda$writeWALTrailerAndMagic$3(AsyncProtobufLogWriter.java:243) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALMetadata(AsyncProtobufLogWriter.java:201) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALTrailerAndMagic(AsyncProtobufLogWriter.java:236) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractProtobufLogWriter.writeWALTrailer(AbstractProtobufLogWriter.java:252) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:160) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:732) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:04:21,388 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-12-10T15:04:21,388 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-12-10T15:04:21,388 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file /user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/MasterData/WALs/db1d50717577,36533,1733842502047/db1d50717577%2C36533%2C1733842502047.1733842503951 2024-12-10T15:04:21,388 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/MasterData/WALs/db1d50717577,36533,1733842502047/db1d50717577%2C36533%2C1733842502047.1733842503951 after 0ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:610) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:164) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:732) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:04:21,388 WARN [Close-WAL-Writer-0 {}] wal.AsyncFSWAL(734): close old writer failed. java.io.InterruptedIOException: Operation cancelled at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.checkIfCancelled(RecoverLeaseFSUtils.java:269) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:159) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:610) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:164) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:732) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:04:21,389 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/MasterData/WALs/db1d50717577,36533,1733842502047/db1d50717577%2C36533%2C1733842502047.1733842503951 2024-12-10T15:04:21,389 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/MasterData/WALs/db1d50717577,36533,1733842502047/db1d50717577%2C36533%2C1733842502047.1733842503951 after 0ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.recoverLease(AsyncFSWAL.java:722) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:735) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;db1d50717577:36533 221 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 45 Waited count: 14 Waiting on java.lang.ref.ReferenceQueue$Lock@56120a08 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 16 Waited count: 22 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 1 Waited count: 30 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 1 Waited count: 28 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2603339a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 5656 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 57 Waiting on java.util.concurrent.CountDownLatch$Sync@4a2b50d2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 12270 Waited count: 12760 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:394) app//org.apache.hadoop.hbase.MiniHBaseCluster.waitUntilShutDown(MiniHBaseCluster.java:921) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniHBaseCluster(HBaseTestingUtility.java:1359) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniCluster(HBaseTestingUtility.java:1341) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 8 Waited count: 9 Waiting on java.lang.ref.ReferenceQueue$Lock@12c786c4 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@2d67e8e5 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@263e077f): State: TIMED_WAITING Blocked count: 0 Waited count: 1126 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 113 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp2988114-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe0a4428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp2988114-38): State: RUNNABLE Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe0a4428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp2988114-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe0a4428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp2988114-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe0a4428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp2988114-41-acceptor-0@26ff4d05-ServerConnector@2d3d9b09{HTTP/1.1, (http/1.1)}{localhost:45609}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp2988114-42): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp2988114-43): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp2988114-44): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-4898edba-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 25 Waited count: 3074 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20e61be0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 36449): State: TIMED_WAITING Blocked count: 1 Waited count: 58 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 113 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@1f645805): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 188 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@40205119): State: TIMED_WAITING Blocked count: 0 Waited count: 113 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 189 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 55715 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1489 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d762131 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 36449): State: TIMED_WAITING Blocked count: 59 Waited count: 2347 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 36449): State: TIMED_WAITING Blocked count: 64 Waited count: 2343 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 36449): State: TIMED_WAITING Blocked count: 45 Waited count: 2329 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 36449): State: TIMED_WAITING Blocked count: 51 Waited count: 2333 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 36449): State: TIMED_WAITING Blocked count: 60 Waited count: 2338 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@34326363): State: TIMED_WAITING Blocked count: 0 Waited count: 282 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@21d9c23a): State: TIMED_WAITING Blocked count: 0 Waited count: 113 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2b27bce4): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@6f79ee98): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1603592889)): State: TIMED_WAITING Blocked count: 0 Waited count: 20 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp671127358-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe0a4428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp671127358-88-acceptor-0@754170a3-ServerConnector@39bccd2b{HTTP/1.1, (http/1.1)}{localhost:44557}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp671127358-89): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp671127358-90): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-801d092-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@3509b2ec): State: TIMED_WAITING Blocked count: 0 Waited count: 1124 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 39217): State: TIMED_WAITING Blocked count: 1 Waited count: 58 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 113 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 333 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5187d410 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-279726562-172.17.0.2-1733842498143 heartbeating to localhost/127.0.0.1:36449): State: TIMED_WAITING Blocked count: 1321 Waited count: 1502 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@3f77ee2e): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 39217): State: TIMED_WAITING Blocked count: 0 Waited count: 617 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 39217): State: TIMED_WAITING Blocked count: 0 Waited count: 589 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 39217): State: TIMED_WAITING Blocked count: 0 Waited count: 563 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 39217): State: TIMED_WAITING Blocked count: 0 Waited count: 563 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 39217): State: TIMED_WAITING Blocked count: 0 Waited count: 614 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 118 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (qtp2042583043-119): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe0a4428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp2042583043-120-acceptor-0@20ec9920-ServerConnector@5be8101a{HTTP/1.1, (http/1.1)}{localhost:37783}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp2042583043-121): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp2042583043-122): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (Session-HouseKeeper-67a2be6d-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (IPC Client (1891196410) connection to localhost/127.0.0.1:36449 from jenkins): State: TIMED_WAITING Blocked count: 1510 Waited count: 1511 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 125 (IPC Parameter Sending Thread for localhost/127.0.0.1:36449): State: TIMED_WAITING Blocked count: 0 Waited count: 2130 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@5731364a): State: TIMED_WAITING Blocked count: 0 Waited count: 1123 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 36315): State: TIMED_WAITING Blocked count: 1 Waited count: 58 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 113 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 136 (Command processor): State: WAITING Blocked count: 0 Waited count: 377 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7335d5e1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 137 (BP-279726562-172.17.0.2-1733842498143 heartbeating to localhost/127.0.0.1:36449): State: TIMED_WAITING Blocked count: 1349 Waited count: 1510 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 138 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@61da8f0b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 139 (IPC Server handler 0 on default port 36315): State: TIMED_WAITING Blocked count: 0 Waited count: 572 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 1 on default port 36315): State: TIMED_WAITING Blocked count: 0 Waited count: 562 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 2 on default port 36315): State: TIMED_WAITING Blocked count: 0 Waited count: 565 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 3 on default port 36315): State: TIMED_WAITING Blocked count: 0 Waited count: 568 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 143 (IPC Server handler 4 on default port 36315): State: TIMED_WAITING Blocked count: 0 Waited count: 566 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 154 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp1446323025-155): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe0a4428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp1446323025-156-acceptor-0@2a0349e5-ServerConnector@2cc41f37{HTTP/1.1, (http/1.1)}{localhost:41005}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (qtp1446323025-157): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (qtp1446323025-158): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (Session-HouseKeeper-156b960-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 162 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data1)): State: TIMED_WAITING Blocked count: 14 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 163 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data3)): State: TIMED_WAITING Blocked count: 12 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 164 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data2)): State: TIMED_WAITING Blocked count: 16 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 165 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data4)): State: TIMED_WAITING Blocked count: 21 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 171 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data4/current/BP-279726562-172.17.0.2-1733842498143): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 175 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data1/current/BP-279726562-172.17.0.2-1733842498143): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data2/current/BP-279726562-172.17.0.2-1733842498143): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data3/current/BP-279726562-172.17.0.2-1733842498143): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 182 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 187 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@7666605a): State: TIMED_WAITING Blocked count: 0 Waited count: 1122 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 192 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 194 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 195 (IPC Server idle connection scanner for port 40961): State: TIMED_WAITING Blocked count: 1 Waited count: 58 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 196 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 113 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 205 (java.util.concurrent.ThreadPoolExecutor$Worker@15769732[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (java.util.concurrent.ThreadPoolExecutor$Worker@26615c1e[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 208 (Command processor): State: WAITING Blocked count: 1 Waited count: 343 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@38e91d19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 209 (BP-279726562-172.17.0.2-1733842498143 heartbeating to localhost/127.0.0.1:36449): State: TIMED_WAITING Blocked count: 1303 Waited count: 1499 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 210 (pool-46-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@11e89da1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 191 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 211 (IPC Server handler 0 on default port 40961): State: TIMED_WAITING Blocked count: 0 Waited count: 569 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 1 on default port 40961): State: TIMED_WAITING Blocked count: 0 Waited count: 564 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 213 (IPC Server handler 2 on default port 40961): State: TIMED_WAITING Blocked count: 0 Waited count: 562 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 214 (IPC Server handler 3 on default port 40961): State: TIMED_WAITING Blocked count: 0 Waited count: 569 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 215 (IPC Server handler 4 on default port 40961): State: TIMED_WAITING Blocked count: 0 Waited count: 561 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 218 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data5)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 219 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 223 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data5/current/BP-279726562-172.17.0.2-1733842498143): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 225 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data6/current/BP-279726562-172.17.0.2-1733842498143): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 229 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 232 (java.util.concurrent.ThreadPoolExecutor$Worker@43dbe75e[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 237 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 19 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtility.java:599) Thread 240 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 241 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:57176): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 238 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 57 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 242 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 281 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 243 (SyncThread:0): State: WAITING Blocked count: 23 Waited count: 747 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@352a9683 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 244 (ProcessThread(sid:0 cport:57176):): State: WAITING Blocked count: 0 Waited count: 834 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@339b0c82 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 245 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 869 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4b5b0e9b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 246 (NIOWorkerThread-1): State: WAITING Blocked count: 2 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1bee4ae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 394 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (RS-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 29 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (Time-limited test-SendThread(127.0.0.1:57176)): State: RUNNABLE Blocked count: 23 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 261 (Time-limited test-EventThread): State: WAITING Blocked count: 7 Waited count: 58 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7996f30f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 262 (NIOWorkerThread-2): State: WAITING Blocked count: 0 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-3): State: WAITING Blocked count: 0 Waited count: 135 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-4): State: WAITING Blocked count: 1 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (zk-event-processor-pool-0): State: WAITING Blocked count: 41 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@16fdd8a6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-5): State: WAITING Blocked count: 1 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-6): State: WAITING Blocked count: 2 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-7): State: WAITING Blocked count: 2 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-8): State: WAITING Blocked count: 0 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-9): State: WAITING Blocked count: 3 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-10): State: WAITING Blocked count: 2 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-11): State: WAITING Blocked count: 2 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-12): State: WAITING Blocked count: 3 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-13): State: WAITING Blocked count: 2 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-14): State: WAITING Blocked count: 1 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-15): State: WAITING Blocked count: 2 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-16): State: WAITING Blocked count: 1 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60c1f8bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36533): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@6010d543 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36533): State: WAITING Blocked count: 215 Waited count: 780 Waiting on java.util.concurrent.Semaphore$NonfairSync@5a386469 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36533): State: WAITING Blocked count: 86 Waited count: 355 Waiting on java.util.concurrent.Semaphore$NonfairSync@14f8faaa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 281 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36533): State: WAITING Blocked count: 84 Waited count: 4933 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6c38287b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36533): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@a644272 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 283 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36533): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@a644272 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=36533): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1366747b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=36533): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@4098f117 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=36533): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@6dce628f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 287 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=36533): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@4591da96 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 291 (RS-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 313 (RS-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 335 (RS-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 93 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 289 (M:0;db1d50717577:36533): State: TIMED_WAITING Blocked count: 6 Waited count: 2589 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.shutdown(AbstractFSWAL.java:1011) app//org.apache.hadoop.hbase.wal.AbstractFSWALProvider.shutdown(AbstractFSWALProvider.java:184) app//org.apache.hadoop.hbase.wal.WALFactory.shutdown(WALFactory.java:272) app//org.apache.hadoop.hbase.master.region.MasterRegion.shutdownWAL(MasterRegion.java:140) app//org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:206) app//org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1758) app//org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:1285) app//org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:603) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 358 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 56 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (master/db1d50717577:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 362 (master/db1d50717577:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 364 (org.apache.hadoop.hdfs.PeerCache@56b48050): State: TIMED_WAITING Blocked count: 0 Waited count: 187 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 383 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 5573 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:179) Thread 400 (RS-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 80 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 401 (RS-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 75 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 414 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 68 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@372912a2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 425 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 413 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 55667 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 434 (RS-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 21 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 435 (RS-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 37 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 452 (RegionServerTracker-0): State: WAITING Blocked count: 8 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@aace79a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 482 (regionserver/db1d50717577:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f58a5f7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 484 (regionserver/db1d50717577:0.procedureResultReporter): State: WAITING Blocked count: 20 Waited count: 33 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@309b02bc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 483 (regionserver/db1d50717577:0.procedureResultReporter): State: WAITING Blocked count: 13 Waited count: 23 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@57da061c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 522 (RS-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 533 (RS-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 535 (region-location-0): State: WAITING Blocked count: 9 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3aa7bb4a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 561 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 55468 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 575 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 607 (RS-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 619 (region-location-1): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3aa7bb4a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 620 (region-location-2): State: WAITING Blocked count: 3 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3aa7bb4a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 621 (region-location-3): State: WAITING Blocked count: 1 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3aa7bb4a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1022 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 390 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1083 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 11 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1119 (RS-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1122 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 65 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fd1bb4c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1180 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1536 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@58ab2617 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1919 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3269 (region-location-4): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3aa7bb4a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4959 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4960 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4961 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8827 (AsyncFSWAL-1-hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/MasterData-prefix:db1d50717577,36533,1733842502047): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3e203680 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8831 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 26 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 8832 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8836 (WAL-Shutdown-0): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.awaitTermination(ThreadPoolExecutor.java:1464) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doShutdown(AsyncFSWAL.java:793) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:995) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:990) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8837 (Close-WAL-Writer-0): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:166) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.recoverLease(AsyncFSWAL.java:722) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:735) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL$$Lambda$1120/0x00007fe0a515ecc8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) 2024-12-10T15:04:25,389 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=1 on file=hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/MasterData/WALs/db1d50717577,36533,1733842502047/db1d50717577%2C36533%2C1733842502047.1733842503951 after 4000ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.recoverLease(AsyncFSWAL.java:722) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:735) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:04:26,386 ERROR [WAL-Shutdown-0 {}] wal.AsyncFSWAL(794): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.async.wait.on.shutdown.seconds" 2024-12-10T15:04:26,386 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-10T15:04:26,386 INFO [M:0;db1d50717577:36533 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-10T15:04:26,387 INFO [M:0;db1d50717577:36533 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:36533 2024-12-10T15:04:26,388 DEBUG [M:0;db1d50717577:36533 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/db1d50717577,36533,1733842502047 already deleted, retry=false 2024-12-10T15:04:26,391 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36449/user/jenkins/test-data/391655db-c52e-76a2-cc95-6023d82989ef/MasterData/WALs/db1d50717577,36533,1733842502047/db1d50717577%2C36533%2C1733842502047.1733842503951 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.recoverLease(AsyncFSWAL.java:722) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:735) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 12 more 2024-12-10T15:04:26,490 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T15:04:26,490 INFO [M:0;db1d50717577:36533 {}] regionserver.HRegionServer(1307): Exiting; stopping=db1d50717577,36533,1733842502047; zookeeper connection closed. 2024-12-10T15:04:26,490 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36533-0x1019cdee8310000, quorum=127.0.0.1:57176, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T15:04:26,493 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6bdef31c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-10T15:04:26,493 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2cc41f37{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-10T15:04:26,493 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-10T15:04:26,494 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7f4d5ab4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-10T15:04:26,494 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2e1b48b6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/hadoop.log.dir/,STOPPED} 2024-12-10T15:04:26,495 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-10T15:04:26,495 WARN [BP-279726562-172.17.0.2-1733842498143 heartbeating to localhost/127.0.0.1:36449 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-10T15:04:26,495 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-10T15:04:26,495 WARN [BP-279726562-172.17.0.2-1733842498143 heartbeating to localhost/127.0.0.1:36449 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-279726562-172.17.0.2-1733842498143 (Datanode Uuid a7638ed4-3cb9-4cf4-ba7d-e2f79ee7a042) service to localhost/127.0.0.1:36449 2024-12-10T15:04:26,497 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data5/current/BP-279726562-172.17.0.2-1733842498143 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-10T15:04:26,497 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data6/current/BP-279726562-172.17.0.2-1733842498143 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-10T15:04:26,497 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-10T15:04:26,499 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@14090edc{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-10T15:04:26,499 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5be8101a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-10T15:04:26,499 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-10T15:04:26,499 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@a110049{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-10T15:04:26,500 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@45f72ff{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/hadoop.log.dir/,STOPPED} 2024-12-10T15:04:26,501 WARN [BP-279726562-172.17.0.2-1733842498143 heartbeating to localhost/127.0.0.1:36449 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-10T15:04:26,501 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-10T15:04:26,501 WARN [BP-279726562-172.17.0.2-1733842498143 heartbeating to localhost/127.0.0.1:36449 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-279726562-172.17.0.2-1733842498143 (Datanode Uuid fd620eca-af69-4116-9d3b-064c4e513dae) service to localhost/127.0.0.1:36449 2024-12-10T15:04:26,501 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-10T15:04:26,501 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data3/current/BP-279726562-172.17.0.2-1733842498143 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-10T15:04:26,501 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data4/current/BP-279726562-172.17.0.2-1733842498143 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-10T15:04:26,501 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-10T15:04:26,503 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5a633356{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-10T15:04:26,503 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@39bccd2b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-10T15:04:26,503 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-10T15:04:26,504 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6d5648e9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-10T15:04:26,504 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7622634b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/hadoop.log.dir/,STOPPED} 2024-12-10T15:04:26,505 WARN [BP-279726562-172.17.0.2-1733842498143 heartbeating to localhost/127.0.0.1:36449 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-10T15:04:26,505 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-10T15:04:26,505 WARN [BP-279726562-172.17.0.2-1733842498143 heartbeating to localhost/127.0.0.1:36449 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-279726562-172.17.0.2-1733842498143 (Datanode Uuid bd32721d-283c-428d-b13c-f1f8357b234b) service to localhost/127.0.0.1:36449 2024-12-10T15:04:26,505 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-10T15:04:26,505 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data1/current/BP-279726562-172.17.0.2-1733842498143 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-10T15:04:26,506 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/cluster_c6d1fde1-68a7-4ee8-4e68-d0da0b5d10d8/dfs/data/data2/current/BP-279726562-172.17.0.2-1733842498143 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-10T15:04:26,506 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-10T15:04:26,512 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7883a2cb{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-10T15:04:26,512 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2d3d9b09{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-10T15:04:26,513 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-10T15:04:26,513 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@343317a8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-10T15:04:26,513 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1a82d853{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/872c4b61-fed8-8ead-cf36-f6523bf622c0/hadoop.log.dir/,STOPPED} 2024-12-10T15:04:26,525 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-10T15:04:26,759 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down