2024-12-08 04:26:16,191 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5fb759d6
2024-12-08 04:26:16,225 main DEBUG Took 0.030074 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging
2024-12-08 04:26:16,225 main DEBUG PluginManager 'Core' found 129 plugins
2024-12-08 04:26:16,226 main DEBUG PluginManager 'Level' found 0 plugins
2024-12-08 04:26:16,227 main DEBUG PluginManager 'Lookup' found 16 plugins
2024-12-08 04:26:16,228 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig].
2024-12-08 04:26:16,237 main DEBUG PluginManager 'TypeConverter' found 26 plugins
2024-12-08 04:26:16,268 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null)
2024-12-08 04:26:16,270 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig].
2024-12-08 04:26:16,271 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null)
2024-12-08 04:26:16,271 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig].
2024-12-08 04:26:16,272 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null)
2024-12-08 04:26:16,272 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig].
2024-12-08 04:26:16,273 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null)
2024-12-08 04:26:16,274 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig].
2024-12-08 04:26:16,274 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null)
2024-12-08 04:26:16,275 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig].
2024-12-08 04:26:16,276 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null)
2024-12-08 04:26:16,276 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig].
2024-12-08 04:26:16,277 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null)
2024-12-08 04:26:16,277 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig].
2024-12-08 04:26:16,278 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null)
2024-12-08 04:26:16,279 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig].
2024-12-08 04:26:16,279 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null)
2024-12-08 04:26:16,280 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig].
2024-12-08 04:26:16,281 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null)
2024-12-08 04:26:16,281 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig].
2024-12-08 04:26:16,282 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null)
2024-12-08 04:26:16,282 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig].
2024-12-08 04:26:16,283 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null)
2024-12-08 04:26:16,284 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig].
2024-12-08 04:26:16,284 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null)
2024-12-08 04:26:16,285 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger].
2024-12-08 04:26:16,287 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null)
2024-12-08 04:26:16,288 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin].
2024-12-08 04:26:16,292 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root})
2024-12-08 04:26:16,293 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout].
2024-12-08 04:26:16,294 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null")
2024-12-08 04:26:16,294 main DEBUG PluginManager 'Converter' found 47 plugins
2024-12-08 04:26:16,310 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender].
2024-12-08 04:26:16,314 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={})
2024-12-08 04:26:16,316 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR
2024-12-08 04:26:16,316 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin].
2024-12-08 04:26:16,317 main DEBUG createAppenders(={Console})
2024-12-08 04:26:16,318 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5fb759d6 initialized
2024-12-08 04:26:16,318 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5fb759d6
2024-12-08 04:26:16,318 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5fb759d6 OK.
2024-12-08 04:26:16,319 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1
2024-12-08 04:26:16,320 main DEBUG OutputStream closed
2024-12-08 04:26:16,320 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true
2024-12-08 04:26:16,320 main DEBUG Appender DefaultConsole-1 stopped with status true
2024-12-08 04:26:16,321 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@5b03b9fe OK
2024-12-08 04:26:16,437 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6
2024-12-08 04:26:16,439 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger
2024-12-08 04:26:16,441 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector
2024-12-08 04:26:16,442 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=
2024-12-08 04:26:16,443 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory
2024-12-08 04:26:16,443 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter
2024-12-08 04:26:16,443 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper
2024-12-08 04:26:16,444 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j
2024-12-08 04:26:16,444 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl
2024-12-08 04:26:16,445 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans
2024-12-08 04:26:16,445 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase
2024-12-08 04:26:16,449 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop
2024-12-08 04:26:16,450 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers
2024-12-08 04:26:16,450 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices
2024-12-08 04:26:16,451 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig
2024-12-08 04:26:16,451 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel
2024-12-08 04:26:16,452 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore
2024-12-08 04:26:16,453 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console
2024-12-08 04:26:16,456 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps.
2024-12-08 04:26:16,456 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@b2c5e07) with optional ClassLoader: null
2024-12-08 04:26:16,456 main DEBUG Shutdown hook enabled. Registering a new one.
2024-12-08 04:26:16,457 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@b2c5e07] started OK.
2024-12-08T04:26:16,477 INFO  [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestExportSnapshot timeout: 13 mins
2024-12-08 04:26:16,488 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED)
2024-12-08 04:26:16,488 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps.
2024-12-08T04:26:17,044 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3
2024-12-08T04:26:17,045 INFO  [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestSecureExportSnapshot timeout: 13 mins
2024-12-08T04:26:17,110 WARN  [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
2024-12-08T04:26:17,445 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false}
2024-12-08T04:26:17,446 INFO  [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283, deleteOnExit=true
2024-12-08T04:26:17,446 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS
2024-12-08T04:26:17,447 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/test.cache.data in system properties and HBase conf
2024-12-08T04:26:17,448 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/hadoop.tmp.dir in system properties and HBase conf
2024-12-08T04:26:17,449 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/hadoop.log.dir in system properties and HBase conf
2024-12-08T04:26:17,449 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/mapreduce.cluster.local.dir in system properties and HBase conf
2024-12-08T04:26:17,450 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/mapreduce.cluster.temp.dir in system properties and HBase conf
2024-12-08T04:26:17,451 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF
2024-12-08T04:26:17,541 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering
2024-12-08T04:26:17,545 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/yarn.node-labels.fs-store.root-dir in system properties and HBase conf
2024-12-08T04:26:17,546 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf
2024-12-08T04:26:17,547 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/yarn.nodemanager.log-dirs in system properties and HBase conf
2024-12-08T04:26:17,547 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf
2024-12-08T04:26:17,548 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf
2024-12-08T04:26:17,549 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf
2024-12-08T04:26:17,549 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf
2024-12-08T04:26:17,550 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/dfs.journalnode.edits.dir in system properties and HBase conf
2024-12-08T04:26:17,551 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf
2024-12-08T04:26:17,551 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/nfs.dump.dir in system properties and HBase conf
2024-12-08T04:26:17,552 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/java.io.tmpdir in system properties and HBase conf
2024-12-08T04:26:17,552 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/dfs.journalnode.edits.dir in system properties and HBase conf
2024-12-08T04:26:17,553 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf
2024-12-08T04:26:17,554 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/fs.s3a.committer.staging.tmp.path in system properties and HBase conf
2024-12-08T04:26:18,713 WARN  [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties
2024-12-08T04:26:18,847 INFO  [Time-limited test {}] log.Log(170): Logging initialized @4165ms to org.eclipse.jetty.util.log.Slf4jLog
2024-12-08T04:26:18,965 WARN  [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret
2024-12-08T04:26:19,103 INFO  [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9
2024-12-08T04:26:19,153 INFO  [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0
2024-12-08T04:26:19,154 INFO  [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults
2024-12-08T04:26:19,156 INFO  [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms
2024-12-08T04:26:19,185 WARN  [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret
2024-12-08T04:26:19,195 INFO  [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1a82d853{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/hadoop.log.dir/,AVAILABLE}
2024-12-08T04:26:19,197 INFO  [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@343317a8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE}
2024-12-08T04:26:19,511 INFO  [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7883a2cb{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/java.io.tmpdir/jetty-localhost-40303-hadoop-hdfs-3_4_1-tests_jar-_-any-3680058089421578265/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs}
2024-12-08T04:26:19,529 INFO  [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2d3d9b09{HTTP/1.1, (http/1.1)}{localhost:40303}
2024-12-08T04:26:19,530 INFO  [Time-limited test {}] server.Server(415): Started @4849ms
2024-12-08T04:26:20,054 WARN  [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret
2024-12-08T04:26:20,064 INFO  [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9
2024-12-08T04:26:20,068 INFO  [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0
2024-12-08T04:26:20,068 INFO  [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults
2024-12-08T04:26:20,068 INFO  [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms
2024-12-08T04:26:20,069 INFO  [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@31a0decf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/hadoop.log.dir/,AVAILABLE}
2024-12-08T04:26:20,070 INFO  [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7622634b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE}
2024-12-08T04:26:20,233 INFO  [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@420d534c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/java.io.tmpdir/jetty-localhost-45189-hadoop-hdfs-3_4_1-tests_jar-_-any-2207998206327847298/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode}
2024-12-08T04:26:20,233 INFO  [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@16165456{HTTP/1.1, (http/1.1)}{localhost:45189}
2024-12-08T04:26:20,234 INFO  [Time-limited test {}] server.Server(415): Started @5553ms
2024-12-08T04:26:20,311 WARN  [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering.
2024-12-08T04:26:20,545 WARN  [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret
2024-12-08T04:26:20,552 INFO  [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9
2024-12-08T04:26:20,580 INFO  [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0
2024-12-08T04:26:20,581 INFO  [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults
2024-12-08T04:26:20,581 INFO  [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms
2024-12-08T04:26:20,584 INFO  [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4ca832e8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/hadoop.log.dir/,AVAILABLE}
2024-12-08T04:26:20,585 INFO  [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@45f72ff{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE}
2024-12-08T04:26:20,759 INFO  [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7a29fbf5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/java.io.tmpdir/jetty-localhost-42377-hadoop-hdfs-3_4_1-tests_jar-_-any-2174061233194809460/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode}
2024-12-08T04:26:20,760 INFO  [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@53bdbdf4{HTTP/1.1, (http/1.1)}{localhost:42377}
2024-12-08T04:26:20,761 INFO  [Time-limited test {}] server.Server(415): Started @6080ms
2024-12-08T04:26:20,764 WARN  [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering.
2024-12-08T04:26:21,026 WARN  [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret
2024-12-08T04:26:21,032 INFO  [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9
2024-12-08T04:26:21,041 WARN  [Thread-100 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data2/current/BP-889361663-172.17.0.2-1733631978419/current, will proceed with Du for space computation calculation, 
2024-12-08T04:26:21,041 WARN  [Thread-99 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data1/current/BP-889361663-172.17.0.2-1733631978419/current, will proceed with Du for space computation calculation, 
2024-12-08T04:26:21,053 INFO  [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0
2024-12-08T04:26:21,053 INFO  [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults
2024-12-08T04:26:21,053 INFO  [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms
2024-12-08T04:26:21,057 INFO  [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3e5b9a3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/hadoop.log.dir/,AVAILABLE}
2024-12-08T04:26:21,058 INFO  [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2e1b48b6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE}
2024-12-08T04:26:21,123 WARN  [Thread-109 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data3/current/BP-889361663-172.17.0.2-1733631978419/current, will proceed with Du for space computation calculation, 
2024-12-08T04:26:21,136 WARN  [Thread-110 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data4/current/BP-889361663-172.17.0.2-1733631978419/current, will proceed with Du for space computation calculation, 
2024-12-08T04:26:21,274 INFO  [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3003ef5d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/java.io.tmpdir/jetty-localhost-40445-hadoop-hdfs-3_4_1-tests_jar-_-any-303121784710798173/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode}
2024-12-08T04:26:21,276 INFO  [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@524316e2{HTTP/1.1, (http/1.1)}{localhost:40445}
2024-12-08T04:26:21,276 INFO  [Time-limited test {}] server.Server(415): Started @6596ms
2024-12-08T04:26:21,283 WARN  [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering.
2024-12-08T04:26:21,295 WARN  [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1
2024-12-08T04:26:21,313 WARN  [Thread-83 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1
2024-12-08T04:26:21,452 INFO  [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcc0b87f1875b0eb5 with lease ID 0x9baf96daddd5ed0c: Processing first storage report for DS-698c8590-153b-40b0-aced-31fb41c17a5e from datanode DatanodeRegistration(127.0.0.1:38289, datanodeUuid=5630b6cd-499b-4a4d-a30a-5d4649b5feb4, infoPort=45371, infoSecurePort=0, ipcPort=44671, storageInfo=lv=-57;cid=testClusterID;nsid=1963332720;c=1733631978419)
2024-12-08T04:26:21,454 INFO  [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcc0b87f1875b0eb5 with lease ID 0x9baf96daddd5ed0c: from storage DS-698c8590-153b-40b0-aced-31fb41c17a5e node DatanodeRegistration(127.0.0.1:38289, datanodeUuid=5630b6cd-499b-4a4d-a30a-5d4649b5feb4, infoPort=45371, infoSecurePort=0, ipcPort=44671, storageInfo=lv=-57;cid=testClusterID;nsid=1963332720;c=1733631978419), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0
2024-12-08T04:26:21,455 INFO  [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa97f2706ab057955 with lease ID 0x9baf96daddd5ed0d: Processing first storage report for DS-d544c124-2689-4f05-b21d-06431c2f1c66 from datanode DatanodeRegistration(127.0.0.1:46763, datanodeUuid=02ad592b-645e-4d06-8605-8493f90ebc27, infoPort=41887, infoSecurePort=0, ipcPort=37189, storageInfo=lv=-57;cid=testClusterID;nsid=1963332720;c=1733631978419)
2024-12-08T04:26:21,455 INFO  [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa97f2706ab057955 with lease ID 0x9baf96daddd5ed0d: from storage DS-d544c124-2689-4f05-b21d-06431c2f1c66 node DatanodeRegistration(127.0.0.1:46763, datanodeUuid=02ad592b-645e-4d06-8605-8493f90ebc27, infoPort=41887, infoSecurePort=0, ipcPort=37189, storageInfo=lv=-57;cid=testClusterID;nsid=1963332720;c=1733631978419), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0
2024-12-08T04:26:21,456 INFO  [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcc0b87f1875b0eb5 with lease ID 0x9baf96daddd5ed0c: Processing first storage report for DS-63b36efc-7d93-42d1-87f3-130f15d6c5f6 from datanode DatanodeRegistration(127.0.0.1:38289, datanodeUuid=5630b6cd-499b-4a4d-a30a-5d4649b5feb4, infoPort=45371, infoSecurePort=0, ipcPort=44671, storageInfo=lv=-57;cid=testClusterID;nsid=1963332720;c=1733631978419)
2024-12-08T04:26:21,456 INFO  [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcc0b87f1875b0eb5 with lease ID 0x9baf96daddd5ed0c: from storage DS-63b36efc-7d93-42d1-87f3-130f15d6c5f6 node DatanodeRegistration(127.0.0.1:38289, datanodeUuid=5630b6cd-499b-4a4d-a30a-5d4649b5feb4, infoPort=45371, infoSecurePort=0, ipcPort=44671, storageInfo=lv=-57;cid=testClusterID;nsid=1963332720;c=1733631978419), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0
2024-12-08T04:26:21,456 INFO  [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa97f2706ab057955 with lease ID 0x9baf96daddd5ed0d: Processing first storage report for DS-e11641bd-c3cd-489e-be1c-220288414b75 from datanode DatanodeRegistration(127.0.0.1:46763, datanodeUuid=02ad592b-645e-4d06-8605-8493f90ebc27, infoPort=41887, infoSecurePort=0, ipcPort=37189, storageInfo=lv=-57;cid=testClusterID;nsid=1963332720;c=1733631978419)
2024-12-08T04:26:21,457 INFO  [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa97f2706ab057955 with lease ID 0x9baf96daddd5ed0d: from storage DS-e11641bd-c3cd-489e-be1c-220288414b75 node DatanodeRegistration(127.0.0.1:46763, datanodeUuid=02ad592b-645e-4d06-8605-8493f90ebc27, infoPort=41887, infoSecurePort=0, ipcPort=37189, storageInfo=lv=-57;cid=testClusterID;nsid=1963332720;c=1733631978419), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0
2024-12-08T04:26:21,643 WARN  [Thread-139 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data5/current/BP-889361663-172.17.0.2-1733631978419/current, will proceed with Du for space computation calculation, 
2024-12-08T04:26:21,660 WARN  [Thread-140 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data6/current/BP-889361663-172.17.0.2-1733631978419/current, will proceed with Du for space computation calculation, 
2024-12-08T04:26:21,726 WARN  [Thread-129 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1
2024-12-08T04:26:21,738 INFO  [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3a974e16c0bea586 with lease ID 0x9baf96daddd5ed0e: Processing first storage report for DS-4d9e1bf2-eda3-47a3-a586-e2356a55f31d from datanode DatanodeRegistration(127.0.0.1:36937, datanodeUuid=7b7eb7da-b69b-41a3-a625-fd862532a332, infoPort=41083, infoSecurePort=0, ipcPort=34801, storageInfo=lv=-57;cid=testClusterID;nsid=1963332720;c=1733631978419)
2024-12-08T04:26:21,738 INFO  [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3a974e16c0bea586 with lease ID 0x9baf96daddd5ed0e: from storage DS-4d9e1bf2-eda3-47a3-a586-e2356a55f31d node DatanodeRegistration(127.0.0.1:36937, datanodeUuid=7b7eb7da-b69b-41a3-a625-fd862532a332, infoPort=41083, infoSecurePort=0, ipcPort=34801, storageInfo=lv=-57;cid=testClusterID;nsid=1963332720;c=1733631978419), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0
2024-12-08T04:26:21,738 INFO  [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3a974e16c0bea586 with lease ID 0x9baf96daddd5ed0e: Processing first storage report for DS-f0a85883-6dee-4757-a33a-d05c7e496e66 from datanode DatanodeRegistration(127.0.0.1:36937, datanodeUuid=7b7eb7da-b69b-41a3-a625-fd862532a332, infoPort=41083, infoSecurePort=0, ipcPort=34801, storageInfo=lv=-57;cid=testClusterID;nsid=1963332720;c=1733631978419)
2024-12-08T04:26:21,739 INFO  [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3a974e16c0bea586 with lease ID 0x9baf96daddd5ed0e: from storage DS-f0a85883-6dee-4757-a33a-d05c7e496e66 node DatanodeRegistration(127.0.0.1:36937, datanodeUuid=7b7eb7da-b69b-41a3-a625-fd862532a332, infoPort=41083, infoSecurePort=0, ipcPort=34801, storageInfo=lv=-57;cid=testClusterID;nsid=1963332720;c=1733631978419), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0
2024-12-08T04:26:22,002 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3
2024-12-08T04:26:22,110 INFO  [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/zookeeper_0, clientPort=55878, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/zookeeper_0/version-2, dataDirSize=457
dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/zookeeper_0/version-2, dataLogSize=457
tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0

2024-12-08T04:26:22,129 INFO  [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=55878
2024-12-08T04:26:22,144 INFO  [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks
2024-12-08T04:26:22,147 INFO  [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks
2024-12-08T04:26:22,473 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741825_1001 (size=7)
2024-12-08T04:26:22,474 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741825_1001 (size=7)
2024-12-08T04:26:22,474 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741825_1001 (size=7)
2024-12-08T04:26:22,887 INFO  [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720 with version=8
2024-12-08T04:26:22,888 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/hbase-staging
2024-12-08T04:26:23,079 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16
2024-12-08T04:26:23,371 INFO  [Time-limited test {}] client.ConnectionUtils(129): master/428ded7e54d6:0 server-side Connection retries=45
2024-12-08T04:26:23,391 INFO  [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3
2024-12-08T04:26:23,391 INFO  [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3
2024-12-08T04:26:23,392 INFO  [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0
2024-12-08T04:26:23,392 INFO  [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3
2024-12-08T04:26:23,392 INFO  [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1
2024-12-08T04:26:23,554 INFO  [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService
2024-12-08T04:26:23,637 INFO  [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl
2024-12-08T04:26:23,649 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout
2024-12-08T04:26:23,655 INFO  [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation
2024-12-08T04:26:23,694 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 13589 (auto-detected)
2024-12-08T04:26:23,696 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected)
2024-12-08T04:26:23,742 INFO  [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:46337
2024-12-08T04:26:23,755 INFO  [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks
2024-12-08T04:26:23,760 INFO  [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks
2024-12-08T04:26:23,778 INFO  [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:46337 connecting to ZooKeeper ensemble=127.0.0.1:55878
2024-12-08T04:26:23,823 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:463370x0, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null
2024-12-08T04:26:23,826 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:46337-0x1006fe072e80000 connected
2024-12-08T04:26:23,884 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master
2024-12-08T04:26:23,888 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running
2024-12-08T04:26:23,907 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl
2024-12-08T04:26:23,912 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46337
2024-12-08T04:26:23,913 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46337
2024-12-08T04:26:23,913 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46337
2024-12-08T04:26:23,914 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46337
2024-12-08T04:26:23,914 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46337
2024-12-08T04:26:23,924 INFO  [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720, hbase.cluster.distributed=false
2024-12-08T04:26:23,995 INFO  [Time-limited test {}] client.ConnectionUtils(129): regionserver/428ded7e54d6:0 server-side Connection retries=45
2024-12-08T04:26:23,995 INFO  [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3
2024-12-08T04:26:23,996 INFO  [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3
2024-12-08T04:26:23,996 INFO  [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0
2024-12-08T04:26:23,996 INFO  [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3
2024-12-08T04:26:23,996 INFO  [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1
2024-12-08T04:26:23,999 INFO  [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService
2024-12-08T04:26:24,004 INFO  [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation
2024-12-08T04:26:24,006 INFO  [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:45955
2024-12-08T04:26:24,008 INFO  [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB
2024-12-08T04:26:24,035 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5
2024-12-08T04:26:24,037 INFO  [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks
2024-12-08T04:26:24,040 INFO  [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks
2024-12-08T04:26:24,045 INFO  [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:45955 connecting to ZooKeeper ensemble=127.0.0.1:55878
2024-12-08T04:26:24,061 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:459550x0, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null
2024-12-08T04:26:24,062 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:459550x0, quorum=127.0.0.1:55878, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master
2024-12-08T04:26:24,062 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45955-0x1006fe072e80001 connected
2024-12-08T04:26:24,064 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running
2024-12-08T04:26:24,065 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl
2024-12-08T04:26:24,074 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45955
2024-12-08T04:26:24,076 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45955
2024-12-08T04:26:24,079 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45955
2024-12-08T04:26:24,082 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45955
2024-12-08T04:26:24,084 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45955
2024-12-08T04:26:24,117 INFO  [Time-limited test {}] client.ConnectionUtils(129): regionserver/428ded7e54d6:0 server-side Connection retries=45
2024-12-08T04:26:24,117 INFO  [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3
2024-12-08T04:26:24,118 INFO  [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3
2024-12-08T04:26:24,118 INFO  [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0
2024-12-08T04:26:24,118 INFO  [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3
2024-12-08T04:26:24,119 INFO  [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1
2024-12-08T04:26:24,119 INFO  [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService
2024-12-08T04:26:24,120 INFO  [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation
2024-12-08T04:26:24,121 INFO  [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:46421
2024-12-08T04:26:24,122 INFO  [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB
2024-12-08T04:26:24,127 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5
2024-12-08T04:26:24,128 INFO  [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks
2024-12-08T04:26:24,131 INFO  [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks
2024-12-08T04:26:24,135 INFO  [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:46421 connecting to ZooKeeper ensemble=127.0.0.1:55878
2024-12-08T04:26:24,146 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:464210x0, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null
2024-12-08T04:26:24,146 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:464210x0, quorum=127.0.0.1:55878, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master
2024-12-08T04:26:24,148 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:464210x0, quorum=127.0.0.1:55878, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running
2024-12-08T04:26:24,149 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:464210x0, quorum=127.0.0.1:55878, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl
2024-12-08T04:26:24,152 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46421-0x1006fe072e80002 connected
2024-12-08T04:26:24,156 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46421
2024-12-08T04:26:24,158 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46421
2024-12-08T04:26:24,160 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46421
2024-12-08T04:26:24,164 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46421
2024-12-08T04:26:24,166 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46421
2024-12-08T04:26:24,190 INFO  [Time-limited test {}] client.ConnectionUtils(129): regionserver/428ded7e54d6:0 server-side Connection retries=45
2024-12-08T04:26:24,190 INFO  [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3
2024-12-08T04:26:24,190 INFO  [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3
2024-12-08T04:26:24,190 INFO  [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0
2024-12-08T04:26:24,191 INFO  [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3
2024-12-08T04:26:24,191 INFO  [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1
2024-12-08T04:26:24,191 INFO  [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService
2024-12-08T04:26:24,191 INFO  [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation
2024-12-08T04:26:24,192 INFO  [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:41743
2024-12-08T04:26:24,193 INFO  [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB
2024-12-08T04:26:24,198 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5
2024-12-08T04:26:24,200 INFO  [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks
2024-12-08T04:26:24,206 INFO  [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks
2024-12-08T04:26:24,210 INFO  [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:41743 connecting to ZooKeeper ensemble=127.0.0.1:55878
2024-12-08T04:26:24,218 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:417430x0, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null
2024-12-08T04:26:24,219 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:417430x0, quorum=127.0.0.1:55878, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master
2024-12-08T04:26:24,222 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:417430x0, quorum=127.0.0.1:55878, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running
2024-12-08T04:26:24,223 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:417430x0, quorum=127.0.0.1:55878, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl
2024-12-08T04:26:24,224 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41743-0x1006fe072e80003 connected
2024-12-08T04:26:24,228 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41743
2024-12-08T04:26:24,228 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41743
2024-12-08T04:26:24,235 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41743
2024-12-08T04:26:24,238 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41743
2024-12-08T04:26:24,239 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41743
2024-12-08T04:26:24,242 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/428ded7e54d6,46337,1733631983069
2024-12-08T04:26:24,250 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters
2024-12-08T04:26:24,250 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters
2024-12-08T04:26:24,251 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46421-0x1006fe072e80002, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters
2024-12-08T04:26:24,251 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters
2024-12-08T04:26:24,253 DEBUG [master/428ded7e54d6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/428ded7e54d6,46337,1733631983069
2024-12-08T04:26:24,264 DEBUG [M:0;428ded7e54d6:46337 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;428ded7e54d6:46337
2024-12-08T04:26:24,298 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master
2024-12-08T04:26:24,298 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase
2024-12-08T04:26:24,298 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46421-0x1006fe072e80002, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master
2024-12-08T04:26:24,298 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master
2024-12-08T04:26:24,298 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46421-0x1006fe072e80002, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase
2024-12-08T04:26:24,299 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master
2024-12-08T04:26:24,299 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase
2024-12-08T04:26:24,299 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase
2024-12-08T04:26:24,302 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Set watcher on existing znode=/hbase/master
2024-12-08T04:26:24,303 DEBUG [master/428ded7e54d6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Set watcher on existing znode=/hbase/master
2024-12-08T04:26:24,304 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/428ded7e54d6,46337,1733631983069 from backup master directory
2024-12-08T04:26:24,307 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/428ded7e54d6,46337,1733631983069
2024-12-08T04:26:24,308 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters
2024-12-08T04:26:24,308 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46421-0x1006fe072e80002, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters
2024-12-08T04:26:24,308 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters
2024-12-08T04:26:24,309 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters
2024-12-08T04:26:24,309 WARN  [master/428ded7e54d6:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!)
2024-12-08T04:26:24,309 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=428ded7e54d6,46337,1733631983069
2024-12-08T04:26:24,312 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0
2024-12-08T04:26:24,320 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0
2024-12-08T04:26:24,423 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741826_1002 (size=42)
2024-12-08T04:26:24,429 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741826_1002 (size=42)
2024-12-08T04:26:24,430 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741826_1002 (size=42)
2024-12-08T04:26:24,431 DEBUG [master/428ded7e54d6:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/hbase.id with ID: e98204de-f98f-4bfd-8560-3d3e1c9ab0f1
2024-12-08T04:26:24,483 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks
2024-12-08T04:26:24,522 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46421-0x1006fe072e80002, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase
2024-12-08T04:26:24,521 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase
2024-12-08T04:26:24,522 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase
2024-12-08T04:26:24,522 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase
2024-12-08T04:26:24,541 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741827_1003 (size=196)
2024-12-08T04:26:24,542 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741827_1003 (size=196)
2024-12-08T04:26:24,542 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741827_1003 (size=196)
2024-12-08T04:26:24,565 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}
2024-12-08T04:26:24,567 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000
2024-12-08T04:26:24,594 DEBUG [master/428ded7e54d6:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396
java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo)
	at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?]
	at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT]
	at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT]
	at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.<clinit>(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT]
	at java.lang.Class.forName0(Native Method) ~[?:?]
	at java.lang.Class.forName(Class.java:375) ~[?:?]
	at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:147) ~[classes/:?]
	at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:160) ~[classes/:?]
	at org.apache.hadoop.hbase.wal.WALFactory.getProvider(WALFactory.java:200) ~[classes/:?]
	at org.apache.hadoop.hbase.wal.WALFactory.<init>(WALFactory.java:232) ~[classes/:?]
	at org.apache.hadoop.hbase.wal.WALFactory.<init>(WALFactory.java:207) ~[classes/:?]
	at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:402) ~[classes/:?]
	at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?]
	at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:973) ~[classes/:?]
	at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2470) ~[classes/:?]
	at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:590) ~[classes/:?]
	at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT]
	at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT]
	at org.apache.hadoop.hbase.master.HMaster.lambda$run$1(HMaster.java:587) ~[classes/:?]
	at java.lang.Thread.run(Thread.java:840) ~[?:?]
2024-12-08T04:26:24,601 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider
2024-12-08T04:26:24,650 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741828_1004 (size=1189)
2024-12-08T04:26:24,651 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741828_1004 (size=1189)
2024-12-08T04:26:24,652 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741828_1004 (size=1189)
2024-12-08T04:26:24,678 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/MasterData/data/master/store
2024-12-08T04:26:24,704 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741829_1005 (size=34)
2024-12-08T04:26:24,705 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741829_1005 (size=34)
2024-12-08T04:26:24,705 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741829_1005 (size=34)
2024-12-08T04:26:24,712 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure.
2024-12-08T04:26:24,713 DEBUG [master/428ded7e54d6:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable
2024-12-08T04:26:24,714 DEBUG [master/428ded7e54d6:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes
2024-12-08T04:26:24,715 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682.
2024-12-08T04:26:24,715 DEBUG [master/428ded7e54d6:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682.
2024-12-08T04:26:24,715 DEBUG [master/428ded7e54d6:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms
2024-12-08T04:26:24,715 DEBUG [master/428ded7e54d6:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682.
2024-12-08T04:26:24,716 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682.
2024-12-08T04:26:24,716 DEBUG [master/428ded7e54d6:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682:

2024-12-08T04:26:24,720 WARN  [master/428ded7e54d6:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/MasterData/data/master/store/.initializing
2024-12-08T04:26:24,720 DEBUG [master/428ded7e54d6:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/MasterData/WALs/428ded7e54d6,46337,1733631983069
2024-12-08T04:26:24,729 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName
2024-12-08T04:26:24,742 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=428ded7e54d6%2C46337%2C1733631983069, suffix=, logDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/MasterData/WALs/428ded7e54d6,46337,1733631983069, archiveDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/MasterData/oldWALs, maxLogs=10
2024-12-08T04:26:24,766 DEBUG [master/428ded7e54d6:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/MasterData/WALs/428ded7e54d6,46337,1733631983069/428ded7e54d6%2C46337%2C1733631983069.1733631984748, exclude list is [], retry=0
2024-12-08T04:26:24,792 DEBUG [RS-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36937,DS-4d9e1bf2-eda3-47a3-a586-e2356a55f31d,DISK]
2024-12-08T04:26:24,792 DEBUG [RS-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46763,DS-d544c124-2689-4f05-b21d-06431c2f1c66,DISK]
2024-12-08T04:26:24,793 DEBUG [RS-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38289,DS-698c8590-153b-40b0-aced-31fb41c17a5e,DISK]
2024-12-08T04:26:24,796 DEBUG [RS-EventLoopGroup-5-1 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf.
2024-12-08T04:26:24,848 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/MasterData/WALs/428ded7e54d6,46337,1733631983069/428ded7e54d6%2C46337%2C1733631983069.1733631984748
2024-12-08T04:26:24,849 DEBUG [master/428ded7e54d6:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41887:41887),(127.0.0.1/127.0.0.1:45371:45371),(127.0.0.1/127.0.0.1:41083:41083)]
2024-12-08T04:26:24,850 DEBUG [master/428ded7e54d6:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}
2024-12-08T04:26:24,850 DEBUG [master/428ded7e54d6:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable
2024-12-08T04:26:24,854 DEBUG [master/428ded7e54d6:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682
2024-12-08T04:26:24,856 DEBUG [master/428ded7e54d6:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682
2024-12-08T04:26:24,903 INFO  [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 
2024-12-08T04:26:24,932 INFO  [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info
2024-12-08T04:26:24,936 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker
2024-12-08T04:26:24,940 INFO  [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info,  memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE
2024-12-08T04:26:24,941 INFO  [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 
2024-12-08T04:26:24,944 INFO  [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc
2024-12-08T04:26:24,945 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker
2024-12-08T04:26:24,946 INFO  [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc,  memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE
2024-12-08T04:26:24,946 INFO  [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 
2024-12-08T04:26:24,949 INFO  [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs
2024-12-08T04:26:24,949 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker
2024-12-08T04:26:24,951 INFO  [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs,  memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE
2024-12-08T04:26:24,951 INFO  [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 
2024-12-08T04:26:24,955 INFO  [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state
2024-12-08T04:26:24,955 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker
2024-12-08T04:26:24,956 INFO  [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state,  memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE
2024-12-08T04:26:24,961 DEBUG [master/428ded7e54d6:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682
2024-12-08T04:26:24,962 DEBUG [master/428ded7e54d6:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682
2024-12-08T04:26:24,974 DEBUG [master/428ded7e54d6:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead.
2024-12-08T04:26:24,979 DEBUG [master/428ded7e54d6:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682
2024-12-08T04:26:24,984 DEBUG [master/428ded7e54d6:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1
2024-12-08T04:26:24,986 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64586986, jitterRate=-0.03757891058921814}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432}
2024-12-08T04:26:24,991 DEBUG [master/428ded7e54d6:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682:

2024-12-08T04:26:24,993 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4
2024-12-08T04:26:25,030 DEBUG [master/428ded7e54d6:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7769161, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null
2024-12-08T04:26:25,067 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating...
2024-12-08T04:26:25,084 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5
2024-12-08T04:26:25,085 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50
2024-12-08T04:26:25,088 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery...
2024-12-08T04:26:25,091 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 3 msec
2024-12-08T04:26:25,099 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 7 msec
2024-12-08T04:26:25,100 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150
2024-12-08T04:26:25,133 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'.
2024-12-08T04:26:25,147 DEBUG [master/428ded7e54d6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error)
2024-12-08T04:26:25,155 DEBUG [master/428ded7e54d6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false
2024-12-08T04:26:25,160 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1
2024-12-08T04:26:25,166 DEBUG [master/428ded7e54d6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error)
2024-12-08T04:26:25,172 DEBUG [master/428ded7e54d6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false
2024-12-08T04:26:25,175 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited
2024-12-08T04:26:25,183 DEBUG [master/428ded7e54d6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error)
2024-12-08T04:26:25,192 DEBUG [master/428ded7e54d6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false
2024-12-08T04:26:25,195 DEBUG [master/428ded7e54d6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error)
2024-12-08T04:26:25,202 DEBUG [master/428ded7e54d6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false
2024-12-08T04:26:25,218 DEBUG [master/428ded7e54d6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error)
2024-12-08T04:26:25,221 DEBUG [master/428ded7e54d6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false
2024-12-08T04:26:25,226 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running
2024-12-08T04:26:25,226 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running
2024-12-08T04:26:25,226 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running
2024-12-08T04:26:25,226 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase
2024-12-08T04:26:25,226 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase
2024-12-08T04:26:25,226 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46421-0x1006fe072e80002, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running
2024-12-08T04:26:25,227 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase
2024-12-08T04:26:25,227 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=428ded7e54d6,46337,1733631983069, sessionid=0x1006fe072e80000, setting cluster-up flag (Was=false)
2024-12-08T04:26:25,227 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46421-0x1006fe072e80002, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase
2024-12-08T04:26:25,245 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase
2024-12-08T04:26:25,245 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase
2024-12-08T04:26:25,245 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase
2024-12-08T04:26:25,245 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46421-0x1006fe072e80002, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase
2024-12-08T04:26:25,255 DEBUG [master/428ded7e54d6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort
2024-12-08T04:26:25,264 DEBUG [master/428ded7e54d6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=428ded7e54d6,46337,1733631983069
2024-12-08T04:26:25,270 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase
2024-12-08T04:26:25,270 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase
2024-12-08T04:26:25,271 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46421-0x1006fe072e80002, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase
2024-12-08T04:26:25,271 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase
2024-12-08T04:26:25,279 DEBUG [master/428ded7e54d6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort
2024-12-08T04:26:25,281 DEBUG [master/428ded7e54d6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=428ded7e54d6,46337,1733631983069
2024-12-08T04:26:25,389 DEBUG [RS:0;428ded7e54d6:45955 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;428ded7e54d6:45955
2024-12-08T04:26:25,398 DEBUG [RS:2;428ded7e54d6:41743 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;428ded7e54d6:41743
2024-12-08T04:26:25,398 DEBUG [RS:1;428ded7e54d6:46421 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;428ded7e54d6:46421
2024-12-08T04:26:25,401 INFO  [RS:0;428ded7e54d6:45955 {}] regionserver.HRegionServer(1008): ClusterId : e98204de-f98f-4bfd-8560-3d3e1c9ab0f1
2024-12-08T04:26:25,405 DEBUG [RS:0;428ded7e54d6:45955 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing
2024-12-08T04:26:25,405 INFO  [RS:1;428ded7e54d6:46421 {}] regionserver.HRegionServer(1008): ClusterId : e98204de-f98f-4bfd-8560-3d3e1c9ab0f1
2024-12-08T04:26:25,407 DEBUG [RS:1;428ded7e54d6:46421 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing
2024-12-08T04:26:25,412 INFO  [RS:2;428ded7e54d6:41743 {}] regionserver.HRegionServer(1008): ClusterId : e98204de-f98f-4bfd-8560-3d3e1c9ab0f1
2024-12-08T04:26:25,412 DEBUG [RS:2;428ded7e54d6:41743 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing
2024-12-08T04:26:25,414 DEBUG [RS:0;428ded7e54d6:45955 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized
2024-12-08T04:26:25,415 DEBUG [RS:0;428ded7e54d6:45955 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing
2024-12-08T04:26:25,416 DEBUG [RS:1;428ded7e54d6:46421 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized
2024-12-08T04:26:25,416 DEBUG [RS:1;428ded7e54d6:46421 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing
2024-12-08T04:26:25,417 DEBUG [RS:2;428ded7e54d6:41743 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized
2024-12-08T04:26:25,417 DEBUG [RS:2;428ded7e54d6:41743 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing
2024-12-08T04:26:25,420 DEBUG [RS:0;428ded7e54d6:45955 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized
2024-12-08T04:26:25,420 DEBUG [RS:2;428ded7e54d6:41743 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized
2024-12-08T04:26:25,421 DEBUG [RS:1;428ded7e54d6:46421 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized
2024-12-08T04:26:25,421 DEBUG [RS:2;428ded7e54d6:41743 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6bcffabc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null
2024-12-08T04:26:25,421 DEBUG [RS:0;428ded7e54d6:45955 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@728c4c70, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null
2024-12-08T04:26:25,421 DEBUG [RS:1;428ded7e54d6:46421 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c7323d6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null
2024-12-08T04:26:25,424 DEBUG [RS:1;428ded7e54d6:46421 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@170d827b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=428ded7e54d6/172.17.0.2:0
2024-12-08T04:26:25,429 INFO  [RS:1;428ded7e54d6:46421 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled
2024-12-08T04:26:25,429 INFO  [RS:1;428ded7e54d6:46421 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled
2024-12-08T04:26:25,436 DEBUG [RS:2;428ded7e54d6:41743 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17660c54, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=428ded7e54d6/172.17.0.2:0
2024-12-08T04:26:25,436 INFO  [RS:2;428ded7e54d6:41743 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled
2024-12-08T04:26:25,436 INFO  [RS:2;428ded7e54d6:41743 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled
2024-12-08T04:26:25,440 DEBUG [RS:0;428ded7e54d6:45955 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4e60472b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=428ded7e54d6/172.17.0.2:0
2024-12-08T04:26:25,441 INFO  [RS:0;428ded7e54d6:45955 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled
2024-12-08T04:26:25,441 INFO  [RS:0;428ded7e54d6:45955 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled
2024-12-08T04:26:25,454 DEBUG [master/428ded7e54d6:0:becomeActiveMaster {}] master.HMaster(3390): Registered master coprocessor service: service=AccessControlService
2024-12-08T04:26:25,454 DEBUG [RS:0;428ded7e54d6:45955 {}] regionserver.HRegionServer(900): Registered regionserver coprocessor executorService: executorService=AccessControlService
2024-12-08T04:26:25,454 DEBUG [RS:2;428ded7e54d6:41743 {}] regionserver.HRegionServer(900): Registered regionserver coprocessor executorService: executorService=AccessControlService
2024-12-08T04:26:25,455 DEBUG [RS:1;428ded7e54d6:46421 {}] regionserver.HRegionServer(900): Registered regionserver coprocessor executorService: executorService=AccessControlService
2024-12-08T04:26:25,455 INFO  [RS:0;428ded7e54d6:45955 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911.
2024-12-08T04:26:25,455 INFO  [RS:1;428ded7e54d6:46421 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911.
2024-12-08T04:26:25,456 DEBUG [RS:0;428ded7e54d6:45955 {}] regionserver.HRegionServer(1090): About to register with Master.
2024-12-08T04:26:25,456 DEBUG [RS:1;428ded7e54d6:46421 {}] regionserver.HRegionServer(1090): About to register with Master.
2024-12-08T04:26:25,456 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911.
2024-12-08T04:26:25,456 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver loaded, priority=536870912.
2024-12-08T04:26:25,456 INFO  [RS:2;428ded7e54d6:41743 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911.
2024-12-08T04:26:25,456 DEBUG [RS:2;428ded7e54d6:41743 {}] regionserver.HRegionServer(1090): About to register with Master.
2024-12-08T04:26:25,459 INFO  [RS:2;428ded7e54d6:41743 {}] regionserver.HRegionServer(3073): reportForDuty to master=428ded7e54d6,46337,1733631983069 with isa=428ded7e54d6/172.17.0.2:41743, startcode=1733631984189
2024-12-08T04:26:25,459 INFO  [RS:1;428ded7e54d6:46421 {}] regionserver.HRegionServer(3073): reportForDuty to master=428ded7e54d6,46337,1733631983069 with isa=428ded7e54d6/172.17.0.2:46421, startcode=1733631984115
2024-12-08T04:26:25,459 INFO  [RS:0;428ded7e54d6:45955 {}] regionserver.HRegionServer(3073): reportForDuty to master=428ded7e54d6,46337,1733631983069 with isa=428ded7e54d6/172.17.0.2:45955, startcode=1733631983994
2024-12-08T04:26:25,476 DEBUG [RS:1;428ded7e54d6:46421 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false
2024-12-08T04:26:25,476 DEBUG [RS:2;428ded7e54d6:41743 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false
2024-12-08T04:26:25,477 DEBUG [RS:0;428ded7e54d6:45955 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false
2024-12-08T04:26:25,537 DEBUG [master/428ded7e54d6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta
2024-12-08T04:26:25,540 INFO  [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44803, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService
2024-12-08T04:26:25,540 INFO  [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55843, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService
2024-12-08T04:26:25,540 INFO  [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36015, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService
2024-12-08T04:26:25,546 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2
2024-12-08T04:26:25,548 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46337 {}] ipc.MetricsHBaseServer(152): Unknown exception type
org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet
	at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3280) ~[classes/:?]
	at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:593) ~[classes/:?]
	at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT]
	at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT]
	at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT]
	at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT]
	at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT]
2024-12-08T04:26:25,550 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc.
2024-12-08T04:26:25,554 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46337 {}] ipc.MetricsHBaseServer(152): Unknown exception type
org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet
	at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3280) ~[classes/:?]
	at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:593) ~[classes/:?]
	at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT]
	at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT]
	at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT]
	at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT]
	at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT]
2024-12-08T04:26:25,555 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46337 {}] ipc.MetricsHBaseServer(152): Unknown exception type
org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet
	at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3280) ~[classes/:?]
	at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:593) ~[classes/:?]
	at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT]
	at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT]
	at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT]
	at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT]
	at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT]
2024-12-08T04:26:25,561 DEBUG [master/428ded7e54d6:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 428ded7e54d6,46337,1733631983069
Number of backup masters: 0
Number of live region servers: 0
Number of dead region servers: 0
Number of unknown region servers: 0
Average load: 0.0
Number of requests: 0
Number of regions: 0
Number of regions in transition: 0
2024-12-08T04:26:25,567 DEBUG [master/428ded7e54d6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/428ded7e54d6:0, corePoolSize=5, maxPoolSize=5
2024-12-08T04:26:25,567 DEBUG [master/428ded7e54d6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/428ded7e54d6:0, corePoolSize=5, maxPoolSize=5
2024-12-08T04:26:25,567 DEBUG [master/428ded7e54d6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/428ded7e54d6:0, corePoolSize=5, maxPoolSize=5
2024-12-08T04:26:25,567 DEBUG [master/428ded7e54d6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/428ded7e54d6:0, corePoolSize=5, maxPoolSize=5
2024-12-08T04:26:25,567 DEBUG [master/428ded7e54d6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/428ded7e54d6:0, corePoolSize=10, maxPoolSize=10
2024-12-08T04:26:25,567 DEBUG [master/428ded7e54d6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/428ded7e54d6:0, corePoolSize=1, maxPoolSize=1
2024-12-08T04:26:25,568 DEBUG [master/428ded7e54d6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/428ded7e54d6:0, corePoolSize=2, maxPoolSize=2
2024-12-08T04:26:25,568 DEBUG [master/428ded7e54d6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/428ded7e54d6:0, corePoolSize=1, maxPoolSize=1
2024-12-08T04:26:25,585 DEBUG [RS:1;428ded7e54d6:46421 {}] regionserver.HRegionServer(3097): Master is not running yet
2024-12-08T04:26:25,585 DEBUG [RS:2;428ded7e54d6:41743 {}] regionserver.HRegionServer(3097): Master is not running yet
2024-12-08T04:26:25,585 DEBUG [RS:0;428ded7e54d6:45955 {}] regionserver.HRegionServer(3097): Master is not running yet
2024-12-08T04:26:25,585 WARN  [RS:1;428ded7e54d6:46421 {}] regionserver.HRegionServer(1099): reportForDuty failed; sleeping 100 ms and then retrying.
2024-12-08T04:26:25,585 WARN  [RS:0;428ded7e54d6:45955 {}] regionserver.HRegionServer(1099): reportForDuty failed; sleeping 100 ms and then retrying.
2024-12-08T04:26:25,585 WARN  [RS:2;428ded7e54d6:41743 {}] regionserver.HRegionServer(1099): reportForDuty failed; sleeping 100 ms and then retrying.
2024-12-08T04:26:25,586 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733632015586
2024-12-08T04:26:25,588 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1
2024-12-08T04:26:25,590 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner
2024-12-08T04:26:25,591 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta
2024-12-08T04:26:25,591 INFO  [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region
2024-12-08T04:26:25,595 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner
2024-12-08T04:26:25,596 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner
2024-12-08T04:26:25,596 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner
2024-12-08T04:26:25,596 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker
2024-12-08T04:26:25,596 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads
2024-12-08T04:26:25,597 INFO  [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}
2024-12-08T04:26:25,604 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled.
2024-12-08T04:26:25,607 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2
2024-12-08T04:26:25,609 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner
2024-12-08T04:26:25,610 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner
2024-12-08T04:26:25,620 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner
2024-12-08T04:26:25,620 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner
2024-12-08T04:26:25,622 DEBUG [master/428ded7e54d6:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/428ded7e54d6:0:becomeActiveMaster-HFileCleaner.large.0-1733631985622,5,FailOnTimeoutGroup]
2024-12-08T04:26:25,623 DEBUG [master/428ded7e54d6:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/428ded7e54d6:0:becomeActiveMaster-HFileCleaner.small.0-1733631985622,5,FailOnTimeoutGroup]
2024-12-08T04:26:25,623 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled.
2024-12-08T04:26:25,623 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it.
2024-12-08T04:26:25,625 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled.
2024-12-08T04:26:25,625 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled.
2024-12-08T04:26:25,646 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741831_1007 (size=1039)
2024-12-08T04:26:25,646 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741831_1007 (size=1039)
2024-12-08T04:26:25,648 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741831_1007 (size=1039)
2024-12-08T04:26:25,650 INFO  [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039
2024-12-08T04:26:25,650 INFO  [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720
2024-12-08T04:26:25,672 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741832_1008 (size=32)
2024-12-08T04:26:25,673 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741832_1008 (size=32)
2024-12-08T04:26:25,673 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741832_1008 (size=32)
2024-12-08T04:26:25,677 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable
2024-12-08T04:26:25,682 INFO  [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 
2024-12-08T04:26:25,687 INFO  [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info
2024-12-08T04:26:25,687 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker
2024-12-08T04:26:25,687 INFO  [RS:0;428ded7e54d6:45955 {}] regionserver.HRegionServer(3073): reportForDuty to master=428ded7e54d6,46337,1733631983069 with isa=428ded7e54d6/172.17.0.2:45955, startcode=1733631983994
2024-12-08T04:26:25,687 INFO  [RS:2;428ded7e54d6:41743 {}] regionserver.HRegionServer(3073): reportForDuty to master=428ded7e54d6,46337,1733631983069 with isa=428ded7e54d6/172.17.0.2:41743, startcode=1733631984189
2024-12-08T04:26:25,687 INFO  [RS:1;428ded7e54d6:46421 {}] regionserver.HRegionServer(3073): reportForDuty to master=428ded7e54d6,46337,1733631983069 with isa=428ded7e54d6/172.17.0.2:46421, startcode=1733631984115
2024-12-08T04:26:25,688 INFO  [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info,  memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE
2024-12-08T04:26:25,688 INFO  [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 
2024-12-08T04:26:25,689 INFO  [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46337 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 428ded7e54d6,45955,1733631983994
2024-12-08T04:26:25,691 INFO  [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier
2024-12-08T04:26:25,691 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker
2024-12-08T04:26:25,692 INFO  [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46337 {}] master.ServerManager(486): Registering regionserver=428ded7e54d6,45955,1733631983994
2024-12-08T04:26:25,696 INFO  [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier,  memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE
2024-12-08T04:26:25,696 INFO  [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 
2024-12-08T04:26:25,700 INFO  [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table
2024-12-08T04:26:25,700 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker
2024-12-08T04:26:25,701 INFO  [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table,  memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE
2024-12-08T04:26:25,705 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/hbase/meta/1588230740
2024-12-08T04:26:25,706 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/hbase/meta/1588230740
2024-12-08T04:26:25,710 INFO  [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46337 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 428ded7e54d6,41743,1733631984189
2024-12-08T04:26:25,710 INFO  [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46337 {}] master.ServerManager(486): Registering regionserver=428ded7e54d6,41743,1733631984189
2024-12-08T04:26:25,710 DEBUG [RS:0;428ded7e54d6:45955 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720
2024-12-08T04:26:25,710 DEBUG [RS:0;428ded7e54d6:45955 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:41407
2024-12-08T04:26:25,710 DEBUG [RS:0;428ded7e54d6:45955 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1
2024-12-08T04:26:25,713 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead.
2024-12-08T04:26:25,715 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs
2024-12-08T04:26:25,716 DEBUG [RS:0;428ded7e54d6:45955 {}] zookeeper.ZKUtil(111): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/428ded7e54d6,45955,1733631983994
2024-12-08T04:26:25,716 WARN  [RS:0;428ded7e54d6:45955 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!)
2024-12-08T04:26:25,716 INFO  [RS:0;428ded7e54d6:45955 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider
2024-12-08T04:26:25,716 INFO  [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46337 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 428ded7e54d6,46421,1733631984115
2024-12-08T04:26:25,716 DEBUG [RS:0;428ded7e54d6:45955 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/WALs/428ded7e54d6,45955,1733631983994
2024-12-08T04:26:25,716 INFO  [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46337 {}] master.ServerManager(486): Registering regionserver=428ded7e54d6,46421,1733631984115
2024-12-08T04:26:25,717 DEBUG [RS:2;428ded7e54d6:41743 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720
2024-12-08T04:26:25,717 DEBUG [RS:2;428ded7e54d6:41743 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:41407
2024-12-08T04:26:25,717 DEBUG [RS:2;428ded7e54d6:41743 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1
2024-12-08T04:26:25,719 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740
2024-12-08T04:26:25,719 DEBUG [RS:1;428ded7e54d6:46421 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720
2024-12-08T04:26:25,720 DEBUG [RS:1;428ded7e54d6:46421 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:41407
2024-12-08T04:26:25,720 DEBUG [RS:1;428ded7e54d6:46421 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1
2024-12-08T04:26:25,722 INFO  [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [428ded7e54d6,45955,1733631983994]
2024-12-08T04:26:25,724 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs
2024-12-08T04:26:25,725 DEBUG [RS:2;428ded7e54d6:41743 {}] zookeeper.ZKUtil(111): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/428ded7e54d6,41743,1733631984189
2024-12-08T04:26:25,725 WARN  [RS:2;428ded7e54d6:41743 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!)
2024-12-08T04:26:25,725 INFO  [RS:2;428ded7e54d6:41743 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider
2024-12-08T04:26:25,725 DEBUG [RS:2;428ded7e54d6:41743 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/WALs/428ded7e54d6,41743,1733631984189
2024-12-08T04:26:25,725 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1
2024-12-08T04:26:25,727 DEBUG [RS:1;428ded7e54d6:46421 {}] zookeeper.ZKUtil(111): regionserver:46421-0x1006fe072e80002, quorum=127.0.0.1:55878, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/428ded7e54d6,46421,1733631984115
2024-12-08T04:26:25,727 INFO  [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [428ded7e54d6,41743,1733631984189]
2024-12-08T04:26:25,727 WARN  [RS:1;428ded7e54d6:46421 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!)
2024-12-08T04:26:25,727 INFO  [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [428ded7e54d6,46421,1733631984115]
2024-12-08T04:26:25,727 INFO  [RS:1;428ded7e54d6:46421 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider
2024-12-08T04:26:25,727 DEBUG [RS:1;428ded7e54d6:46421 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/WALs/428ded7e54d6,46421,1733631984115
2024-12-08T04:26:25,729 INFO  [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60215736, jitterRate=-0.10271561145782471}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242}
2024-12-08T04:26:25,733 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740:

2024-12-08T04:26:25,734 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes
2024-12-08T04:26:25,734 INFO  [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740
2024-12-08T04:26:25,734 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740
2024-12-08T04:26:25,734 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms
2024-12-08T04:26:25,734 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740
2024-12-08T04:26:25,738 DEBUG [RS:0;428ded7e54d6:45955 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds
2024-12-08T04:26:25,738 DEBUG [RS:2;428ded7e54d6:41743 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds
2024-12-08T04:26:25,740 INFO  [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740
2024-12-08T04:26:25,740 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740:

2024-12-08T04:26:25,743 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta
2024-12-08T04:26:25,744 INFO  [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta
2024-12-08T04:26:25,751 DEBUG [RS:1;428ded7e54d6:46421 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds
2024-12-08T04:26:25,752 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}]
2024-12-08T04:26:25,753 INFO  [RS:1;428ded7e54d6:46421 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds
2024-12-08T04:26:25,755 INFO  [RS:0;428ded7e54d6:45955 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds
2024-12-08T04:26:25,762 INFO  [RS:2;428ded7e54d6:41743 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds
2024-12-08T04:26:25,788 INFO  [RS:0;428ded7e54d6:45955 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false
2024-12-08T04:26:25,789 INFO  [RS:1;428ded7e54d6:46421 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false
2024-12-08T04:26:25,791 INFO  [RS:2;428ded7e54d6:41743 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false
2024-12-08T04:26:25,794 INFO  [RS:2;428ded7e54d6:41743 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms
2024-12-08T04:26:25,794 INFO  [RS:2;428ded7e54d6:41743 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled.
2024-12-08T04:26:25,796 INFO  [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN
2024-12-08T04:26:25,800 INFO  [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false
2024-12-08T04:26:25,803 INFO  [RS:2;428ded7e54d6:41743 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S
2024-12-08T04:26:25,807 INFO  [RS:0;428ded7e54d6:45955 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms
2024-12-08T04:26:25,807 INFO  [RS:0;428ded7e54d6:45955 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled.
2024-12-08T04:26:25,813 INFO  [RS:0;428ded7e54d6:45955 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S
2024-12-08T04:26:25,815 INFO  [RS:2;428ded7e54d6:41743 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled.
2024-12-08T04:26:25,815 INFO  [RS:0;428ded7e54d6:45955 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled.
2024-12-08T04:26:25,815 DEBUG [RS:0;428ded7e54d6:45955 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/428ded7e54d6:0, corePoolSize=1, maxPoolSize=1
2024-12-08T04:26:25,815 DEBUG [RS:2;428ded7e54d6:41743 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/428ded7e54d6:0, corePoolSize=1, maxPoolSize=1
2024-12-08T04:26:25,816 DEBUG [RS:0;428ded7e54d6:45955 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/428ded7e54d6:0, corePoolSize=1, maxPoolSize=1
2024-12-08T04:26:25,816 DEBUG [RS:0;428ded7e54d6:45955 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/428ded7e54d6:0, corePoolSize=1, maxPoolSize=1
2024-12-08T04:26:25,816 DEBUG [RS:0;428ded7e54d6:45955 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/428ded7e54d6:0, corePoolSize=1, maxPoolSize=1
2024-12-08T04:26:25,816 DEBUG [RS:0;428ded7e54d6:45955 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/428ded7e54d6:0, corePoolSize=1, maxPoolSize=1
2024-12-08T04:26:25,816 DEBUG [RS:0;428ded7e54d6:45955 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/428ded7e54d6:0, corePoolSize=2, maxPoolSize=2
2024-12-08T04:26:25,816 DEBUG [RS:0;428ded7e54d6:45955 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/428ded7e54d6:0, corePoolSize=1, maxPoolSize=1
2024-12-08T04:26:25,817 DEBUG [RS:0;428ded7e54d6:45955 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/428ded7e54d6:0, corePoolSize=1, maxPoolSize=1
2024-12-08T04:26:25,817 DEBUG [RS:0;428ded7e54d6:45955 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/428ded7e54d6:0, corePoolSize=1, maxPoolSize=1
2024-12-08T04:26:25,817 DEBUG [RS:0;428ded7e54d6:45955 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/428ded7e54d6:0, corePoolSize=1, maxPoolSize=1
2024-12-08T04:26:25,817 DEBUG [RS:0;428ded7e54d6:45955 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/428ded7e54d6:0, corePoolSize=1, maxPoolSize=1
2024-12-08T04:26:25,817 DEBUG [RS:0;428ded7e54d6:45955 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0, corePoolSize=3, maxPoolSize=3
2024-12-08T04:26:25,817 DEBUG [RS:0;428ded7e54d6:45955 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/428ded7e54d6:0, corePoolSize=3, maxPoolSize=3
2024-12-08T04:26:25,818 DEBUG [RS:2;428ded7e54d6:41743 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/428ded7e54d6:0, corePoolSize=1, maxPoolSize=1
2024-12-08T04:26:25,818 DEBUG [RS:2;428ded7e54d6:41743 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/428ded7e54d6:0, corePoolSize=1, maxPoolSize=1
2024-12-08T04:26:25,818 DEBUG [RS:2;428ded7e54d6:41743 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/428ded7e54d6:0, corePoolSize=1, maxPoolSize=1
2024-12-08T04:26:25,818 DEBUG [RS:2;428ded7e54d6:41743 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/428ded7e54d6:0, corePoolSize=1, maxPoolSize=1
2024-12-08T04:26:25,818 DEBUG [RS:2;428ded7e54d6:41743 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/428ded7e54d6:0, corePoolSize=2, maxPoolSize=2
2024-12-08T04:26:25,818 DEBUG [RS:2;428ded7e54d6:41743 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/428ded7e54d6:0, corePoolSize=1, maxPoolSize=1
2024-12-08T04:26:25,818 DEBUG [RS:2;428ded7e54d6:41743 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/428ded7e54d6:0, corePoolSize=1, maxPoolSize=1
2024-12-08T04:26:25,819 DEBUG [RS:2;428ded7e54d6:41743 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/428ded7e54d6:0, corePoolSize=1, maxPoolSize=1
2024-12-08T04:26:25,819 DEBUG [RS:2;428ded7e54d6:41743 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/428ded7e54d6:0, corePoolSize=1, maxPoolSize=1
2024-12-08T04:26:25,819 DEBUG [RS:2;428ded7e54d6:41743 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/428ded7e54d6:0, corePoolSize=1, maxPoolSize=1
2024-12-08T04:26:25,819 DEBUG [RS:2;428ded7e54d6:41743 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0, corePoolSize=3, maxPoolSize=3
2024-12-08T04:26:25,819 DEBUG [RS:2;428ded7e54d6:41743 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/428ded7e54d6:0, corePoolSize=3, maxPoolSize=3
2024-12-08T04:26:25,820 INFO  [RS:1;428ded7e54d6:46421 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms
2024-12-08T04:26:25,820 INFO  [RS:1;428ded7e54d6:46421 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled.
2024-12-08T04:26:25,825 INFO  [RS:1;428ded7e54d6:46421 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S
2024-12-08T04:26:25,827 INFO  [RS:2;428ded7e54d6:41743 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled.
2024-12-08T04:26:25,827 INFO  [RS:1;428ded7e54d6:46421 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled.
2024-12-08T04:26:25,827 INFO  [RS:2;428ded7e54d6:41743 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled.
2024-12-08T04:26:25,827 INFO  [RS:2;428ded7e54d6:41743 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled.
2024-12-08T04:26:25,827 DEBUG [RS:1;428ded7e54d6:46421 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/428ded7e54d6:0, corePoolSize=1, maxPoolSize=1
2024-12-08T04:26:25,827 INFO  [RS:2;428ded7e54d6:41743 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled.
2024-12-08T04:26:25,828 INFO  [RS:2;428ded7e54d6:41743 {}] hbase.ChoreService(168): Chore ScheduledChore name=428ded7e54d6,41743,1733631984189-MobFileCleanerChore, period=86400, unit=SECONDS is enabled.
2024-12-08T04:26:25,828 DEBUG [RS:1;428ded7e54d6:46421 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/428ded7e54d6:0, corePoolSize=1, maxPoolSize=1
2024-12-08T04:26:25,828 DEBUG [RS:1;428ded7e54d6:46421 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/428ded7e54d6:0, corePoolSize=1, maxPoolSize=1
2024-12-08T04:26:25,828 DEBUG [RS:1;428ded7e54d6:46421 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/428ded7e54d6:0, corePoolSize=1, maxPoolSize=1
2024-12-08T04:26:25,828 DEBUG [RS:1;428ded7e54d6:46421 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/428ded7e54d6:0, corePoolSize=1, maxPoolSize=1
2024-12-08T04:26:25,828 DEBUG [RS:1;428ded7e54d6:46421 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/428ded7e54d6:0, corePoolSize=2, maxPoolSize=2
2024-12-08T04:26:25,828 DEBUG [RS:1;428ded7e54d6:46421 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/428ded7e54d6:0, corePoolSize=1, maxPoolSize=1
2024-12-08T04:26:25,828 DEBUG [RS:1;428ded7e54d6:46421 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/428ded7e54d6:0, corePoolSize=1, maxPoolSize=1
2024-12-08T04:26:25,829 DEBUG [RS:1;428ded7e54d6:46421 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/428ded7e54d6:0, corePoolSize=1, maxPoolSize=1
2024-12-08T04:26:25,829 DEBUG [RS:1;428ded7e54d6:46421 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/428ded7e54d6:0, corePoolSize=1, maxPoolSize=1
2024-12-08T04:26:25,829 DEBUG [RS:1;428ded7e54d6:46421 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/428ded7e54d6:0, corePoolSize=1, maxPoolSize=1
2024-12-08T04:26:25,829 DEBUG [RS:1;428ded7e54d6:46421 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0, corePoolSize=3, maxPoolSize=3
2024-12-08T04:26:25,829 DEBUG [RS:1;428ded7e54d6:46421 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/428ded7e54d6:0, corePoolSize=3, maxPoolSize=3
2024-12-08T04:26:25,832 INFO  [RS:0;428ded7e54d6:45955 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled.
2024-12-08T04:26:25,832 INFO  [RS:0;428ded7e54d6:45955 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled.
2024-12-08T04:26:25,832 INFO  [RS:0;428ded7e54d6:45955 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled.
2024-12-08T04:26:25,832 INFO  [RS:0;428ded7e54d6:45955 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled.
2024-12-08T04:26:25,832 INFO  [RS:0;428ded7e54d6:45955 {}] hbase.ChoreService(168): Chore ScheduledChore name=428ded7e54d6,45955,1733631983994-MobFileCleanerChore, period=86400, unit=SECONDS is enabled.
2024-12-08T04:26:25,837 INFO  [RS:1;428ded7e54d6:46421 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled.
2024-12-08T04:26:25,837 INFO  [RS:1;428ded7e54d6:46421 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled.
2024-12-08T04:26:25,837 INFO  [RS:1;428ded7e54d6:46421 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled.
2024-12-08T04:26:25,837 INFO  [RS:1;428ded7e54d6:46421 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled.
2024-12-08T04:26:25,837 INFO  [RS:1;428ded7e54d6:46421 {}] hbase.ChoreService(168): Chore ScheduledChore name=428ded7e54d6,46421,1733631984115-MobFileCleanerChore, period=86400, unit=SECONDS is enabled.
2024-12-08T04:26:25,859 INFO  [RS:2;428ded7e54d6:41743 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false
2024-12-08T04:26:25,859 INFO  [RS:0;428ded7e54d6:45955 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false
2024-12-08T04:26:25,862 INFO  [RS:2;428ded7e54d6:41743 {}] hbase.ChoreService(168): Chore ScheduledChore name=428ded7e54d6,41743,1733631984189-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled.
2024-12-08T04:26:25,862 INFO  [RS:0;428ded7e54d6:45955 {}] hbase.ChoreService(168): Chore ScheduledChore name=428ded7e54d6,45955,1733631983994-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled.
2024-12-08T04:26:25,870 INFO  [RS:1;428ded7e54d6:46421 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false
2024-12-08T04:26:25,870 INFO  [RS:1;428ded7e54d6:46421 {}] hbase.ChoreService(168): Chore ScheduledChore name=428ded7e54d6,46421,1733631984115-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled.
2024-12-08T04:26:25,896 INFO  [RS:2;428ded7e54d6:41743 {}] regionserver.Replication(204): 428ded7e54d6,41743,1733631984189 started
2024-12-08T04:26:25,896 INFO  [RS:0;428ded7e54d6:45955 {}] regionserver.Replication(204): 428ded7e54d6,45955,1733631983994 started
2024-12-08T04:26:25,896 INFO  [RS:2;428ded7e54d6:41743 {}] regionserver.HRegionServer(1767): Serving as 428ded7e54d6,41743,1733631984189, RpcServer on 428ded7e54d6/172.17.0.2:41743, sessionid=0x1006fe072e80003
2024-12-08T04:26:25,896 INFO  [RS:0;428ded7e54d6:45955 {}] regionserver.HRegionServer(1767): Serving as 428ded7e54d6,45955,1733631983994, RpcServer on 428ded7e54d6/172.17.0.2:45955, sessionid=0x1006fe072e80001
2024-12-08T04:26:25,897 DEBUG [RS:2;428ded7e54d6:41743 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting
2024-12-08T04:26:25,897 DEBUG [RS:2;428ded7e54d6:41743 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 428ded7e54d6,41743,1733631984189
2024-12-08T04:26:25,897 DEBUG [RS:0;428ded7e54d6:45955 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting
2024-12-08T04:26:25,897 DEBUG [RS:2;428ded7e54d6:41743 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '428ded7e54d6,41743,1733631984189'
2024-12-08T04:26:25,897 DEBUG [RS:0;428ded7e54d6:45955 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 428ded7e54d6,45955,1733631983994
2024-12-08T04:26:25,897 DEBUG [RS:2;428ded7e54d6:41743 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort'
2024-12-08T04:26:25,897 DEBUG [RS:0;428ded7e54d6:45955 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '428ded7e54d6,45955,1733631983994'
2024-12-08T04:26:25,897 DEBUG [RS:0;428ded7e54d6:45955 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort'
2024-12-08T04:26:25,898 INFO  [RS:1;428ded7e54d6:46421 {}] regionserver.Replication(204): 428ded7e54d6,46421,1733631984115 started
2024-12-08T04:26:25,898 INFO  [RS:1;428ded7e54d6:46421 {}] regionserver.HRegionServer(1767): Serving as 428ded7e54d6,46421,1733631984115, RpcServer on 428ded7e54d6/172.17.0.2:46421, sessionid=0x1006fe072e80002
2024-12-08T04:26:25,898 DEBUG [RS:1;428ded7e54d6:46421 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting
2024-12-08T04:26:25,899 DEBUG [RS:1;428ded7e54d6:46421 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 428ded7e54d6,46421,1733631984115
2024-12-08T04:26:25,899 DEBUG [RS:1;428ded7e54d6:46421 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '428ded7e54d6,46421,1733631984115'
2024-12-08T04:26:25,899 DEBUG [RS:1;428ded7e54d6:46421 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort'
2024-12-08T04:26:25,899 DEBUG [RS:2;428ded7e54d6:41743 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired'
2024-12-08T04:26:25,899 DEBUG [RS:0;428ded7e54d6:45955 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired'
2024-12-08T04:26:25,900 DEBUG [RS:1;428ded7e54d6:46421 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired'
2024-12-08T04:26:25,900 DEBUG [RS:2;428ded7e54d6:41743 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started
2024-12-08T04:26:25,900 DEBUG [RS:2;428ded7e54d6:41743 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting
2024-12-08T04:26:25,901 DEBUG [RS:2;428ded7e54d6:41743 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 428ded7e54d6,41743,1733631984189
2024-12-08T04:26:25,901 DEBUG [RS:2;428ded7e54d6:41743 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '428ded7e54d6,41743,1733631984189'
2024-12-08T04:26:25,901 DEBUG [RS:2;428ded7e54d6:41743 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort'
2024-12-08T04:26:25,901 DEBUG [RS:1;428ded7e54d6:46421 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started
2024-12-08T04:26:25,902 DEBUG [RS:1;428ded7e54d6:46421 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting
2024-12-08T04:26:25,902 DEBUG [RS:1;428ded7e54d6:46421 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 428ded7e54d6,46421,1733631984115
2024-12-08T04:26:25,902 DEBUG [RS:2;428ded7e54d6:41743 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired'
2024-12-08T04:26:25,902 DEBUG [RS:1;428ded7e54d6:46421 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '428ded7e54d6,46421,1733631984115'
2024-12-08T04:26:25,902 DEBUG [RS:1;428ded7e54d6:46421 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort'
2024-12-08T04:26:25,902 DEBUG [RS:1;428ded7e54d6:46421 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired'
2024-12-08T04:26:25,902 DEBUG [RS:2;428ded7e54d6:41743 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started
2024-12-08T04:26:25,903 INFO  [RS:2;428ded7e54d6:41743 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled
2024-12-08T04:26:25,903 DEBUG [RS:0;428ded7e54d6:45955 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started
2024-12-08T04:26:25,903 INFO  [RS:2;428ded7e54d6:41743 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager.
2024-12-08T04:26:25,903 DEBUG [RS:1;428ded7e54d6:46421 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started
2024-12-08T04:26:25,903 INFO  [RS:1;428ded7e54d6:46421 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled
2024-12-08T04:26:25,903 INFO  [RS:1;428ded7e54d6:46421 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager.
2024-12-08T04:26:25,903 DEBUG [RS:0;428ded7e54d6:45955 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting
2024-12-08T04:26:25,903 DEBUG [RS:0;428ded7e54d6:45955 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 428ded7e54d6,45955,1733631983994
2024-12-08T04:26:25,905 DEBUG [RS:0;428ded7e54d6:45955 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '428ded7e54d6,45955,1733631983994'
2024-12-08T04:26:25,905 DEBUG [RS:0;428ded7e54d6:45955 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort'
2024-12-08T04:26:25,906 DEBUG [RS:0;428ded7e54d6:45955 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired'
2024-12-08T04:26:25,907 DEBUG [RS:0;428ded7e54d6:45955 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started
2024-12-08T04:26:25,907 INFO  [RS:0;428ded7e54d6:45955 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled
2024-12-08T04:26:25,907 INFO  [RS:0;428ded7e54d6:45955 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager.
2024-12-08T04:26:25,951 WARN  [428ded7e54d6:46337 {}] assignment.AssignmentManager(2423): No servers available; cannot place 1 unassigned regions.
2024-12-08T04:26:26,010 INFO  [RS:1;428ded7e54d6:46421 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName
2024-12-08T04:26:26,010 INFO  [RS:0;428ded7e54d6:45955 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName
2024-12-08T04:26:26,011 INFO  [RS:2;428ded7e54d6:41743 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName
2024-12-08T04:26:26,014 INFO  [RS:0;428ded7e54d6:45955 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=428ded7e54d6%2C45955%2C1733631983994, suffix=, logDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/WALs/428ded7e54d6,45955,1733631983994, archiveDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/oldWALs, maxLogs=32
2024-12-08T04:26:26,014 INFO  [RS:2;428ded7e54d6:41743 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=428ded7e54d6%2C41743%2C1733631984189, suffix=, logDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/WALs/428ded7e54d6,41743,1733631984189, archiveDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/oldWALs, maxLogs=32
2024-12-08T04:26:26,014 INFO  [RS:1;428ded7e54d6:46421 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=428ded7e54d6%2C46421%2C1733631984115, suffix=, logDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/WALs/428ded7e54d6,46421,1733631984115, archiveDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/oldWALs, maxLogs=32
2024-12-08T04:26:26,034 DEBUG [RS:0;428ded7e54d6:45955 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/WALs/428ded7e54d6,45955,1733631983994/428ded7e54d6%2C45955%2C1733631983994.1733631986018, exclude list is [], retry=0
2024-12-08T04:26:26,040 DEBUG [RS-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46763,DS-d544c124-2689-4f05-b21d-06431c2f1c66,DISK]
2024-12-08T04:26:26,040 DEBUG [RS:2;428ded7e54d6:41743 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/WALs/428ded7e54d6,41743,1733631984189/428ded7e54d6%2C41743%2C1733631984189.1733631986018, exclude list is [], retry=0
2024-12-08T04:26:26,040 DEBUG [RS:1;428ded7e54d6:46421 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/WALs/428ded7e54d6,46421,1733631984115/428ded7e54d6%2C46421%2C1733631984115.1733631986018, exclude list is [], retry=0
2024-12-08T04:26:26,040 DEBUG [RS-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38289,DS-698c8590-153b-40b0-aced-31fb41c17a5e,DISK]
2024-12-08T04:26:26,041 DEBUG [RS-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36937,DS-4d9e1bf2-eda3-47a3-a586-e2356a55f31d,DISK]
2024-12-08T04:26:26,046 DEBUG [RS-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38289,DS-698c8590-153b-40b0-aced-31fb41c17a5e,DISK]
2024-12-08T04:26:26,046 DEBUG [RS-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36937,DS-4d9e1bf2-eda3-47a3-a586-e2356a55f31d,DISK]
2024-12-08T04:26:26,046 DEBUG [RS-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46763,DS-d544c124-2689-4f05-b21d-06431c2f1c66,DISK]
2024-12-08T04:26:26,074 DEBUG [RS-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38289,DS-698c8590-153b-40b0-aced-31fb41c17a5e,DISK]
2024-12-08T04:26:26,074 DEBUG [RS-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46763,DS-d544c124-2689-4f05-b21d-06431c2f1c66,DISK]
2024-12-08T04:26:26,074 DEBUG [RS-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36937,DS-4d9e1bf2-eda3-47a3-a586-e2356a55f31d,DISK]
2024-12-08T04:26:26,079 INFO  [RS:0;428ded7e54d6:45955 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/WALs/428ded7e54d6,45955,1733631983994/428ded7e54d6%2C45955%2C1733631983994.1733631986018
2024-12-08T04:26:26,080 INFO  [RS:1;428ded7e54d6:46421 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/WALs/428ded7e54d6,46421,1733631984115/428ded7e54d6%2C46421%2C1733631984115.1733631986018
2024-12-08T04:26:26,082 DEBUG [RS:0;428ded7e54d6:45955 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41887:41887),(127.0.0.1/127.0.0.1:45371:45371),(127.0.0.1/127.0.0.1:41083:41083)]
2024-12-08T04:26:26,082 INFO  [RS:2;428ded7e54d6:41743 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/WALs/428ded7e54d6,41743,1733631984189/428ded7e54d6%2C41743%2C1733631984189.1733631986018
2024-12-08T04:26:26,083 DEBUG [RS:2;428ded7e54d6:41743 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41083:41083),(127.0.0.1/127.0.0.1:41887:41887),(127.0.0.1/127.0.0.1:45371:45371)]
2024-12-08T04:26:26,089 DEBUG [RS:1;428ded7e54d6:46421 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45371:45371),(127.0.0.1/127.0.0.1:41887:41887),(127.0.0.1/127.0.0.1:41083:41083)]
2024-12-08T04:26:26,203 DEBUG [428ded7e54d6:46337 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=3, allServersCount=3
2024-12-08T04:26:26,206 DEBUG [428ded7e54d6:46337 {}] balancer.BalancerClusterState(202): Hosts are {428ded7e54d6=0} racks are {/default-rack=0}
2024-12-08T04:26:26,213 DEBUG [428ded7e54d6:46337 {}] balancer.BalancerClusterState(303): server 0 is on host 0
2024-12-08T04:26:26,213 DEBUG [428ded7e54d6:46337 {}] balancer.BalancerClusterState(303): server 1 is on host 0
2024-12-08T04:26:26,213 DEBUG [428ded7e54d6:46337 {}] balancer.BalancerClusterState(303): server 2 is on host 0
2024-12-08T04:26:26,213 INFO  [428ded7e54d6:46337 {}] balancer.BalancerClusterState(314): server 0 is on rack 0
2024-12-08T04:26:26,213 INFO  [428ded7e54d6:46337 {}] balancer.BalancerClusterState(314): server 1 is on rack 0
2024-12-08T04:26:26,213 INFO  [428ded7e54d6:46337 {}] balancer.BalancerClusterState(314): server 2 is on rack 0
2024-12-08T04:26:26,213 DEBUG [428ded7e54d6:46337 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1
2024-12-08T04:26:26,219 INFO  [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=428ded7e54d6,41743,1733631984189
2024-12-08T04:26:26,224 INFO  [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 428ded7e54d6,41743,1733631984189, state=OPENING
2024-12-08T04:26:26,229 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it
2024-12-08T04:26:26,231 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase
2024-12-08T04:26:26,231 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase
2024-12-08T04:26:26,231 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46421-0x1006fe072e80002, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase
2024-12-08T04:26:26,231 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase
2024-12-08T04:26:26,232 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED
2024-12-08T04:26:26,232 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED
2024-12-08T04:26:26,232 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED
2024-12-08T04:26:26,232 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED
2024-12-08T04:26:26,234 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=428ded7e54d6,41743,1733631984189}]
2024-12-08T04:26:26,413 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,41743,1733631984189
2024-12-08T04:26:26,415 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false
2024-12-08T04:26:26,417 INFO  [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52814, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService
2024-12-08T04:26:26,431 INFO  [RS_OPEN_META-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740
2024-12-08T04:26:26,431 INFO  [RS_OPEN_META-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider
2024-12-08T04:26:26,432 INFO  [RS_OPEN_META-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta
2024-12-08T04:26:26,435 INFO  [RS_OPEN_META-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=428ded7e54d6%2C41743%2C1733631984189.meta, suffix=.meta, logDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/WALs/428ded7e54d6,41743,1733631984189, archiveDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/oldWALs, maxLogs=32
2024-12-08T04:26:26,452 DEBUG [RS_OPEN_META-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/WALs/428ded7e54d6,41743,1733631984189/428ded7e54d6%2C41743%2C1733631984189.meta.1733631986437.meta, exclude list is [], retry=0
2024-12-08T04:26:26,457 DEBUG [RS-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46763,DS-d544c124-2689-4f05-b21d-06431c2f1c66,DISK]
2024-12-08T04:26:26,457 DEBUG [RS-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36937,DS-4d9e1bf2-eda3-47a3-a586-e2356a55f31d,DISK]
2024-12-08T04:26:26,458 DEBUG [RS-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38289,DS-698c8590-153b-40b0-aced-31fb41c17a5e,DISK]
2024-12-08T04:26:26,468 INFO  [RS_OPEN_META-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/WALs/428ded7e54d6,41743,1733631984189/428ded7e54d6%2C41743%2C1733631984189.meta.1733631986437.meta
2024-12-08T04:26:26,469 DEBUG [RS_OPEN_META-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41887:41887),(127.0.0.1/127.0.0.1:45371:45371),(127.0.0.1/127.0.0.1:41083:41083)]
2024-12-08T04:26:26,469 DEBUG [RS_OPEN_META-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}
2024-12-08T04:26:26,471 DEBUG [RS_OPEN_META-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=AccessControlService
2024-12-08T04:26:26,472 INFO  [RS_OPEN_META-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911.
2024-12-08T04:26:26,473 DEBUG [RS_OPEN_META-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911
2024-12-08T04:26:26,475 DEBUG [RS_OPEN_META-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService
2024-12-08T04:26:26,476 INFO  [RS_OPEN_META-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully.
2024-12-08T04:26:26,488 DEBUG [RS_OPEN_META-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740
2024-12-08T04:26:26,488 DEBUG [RS_OPEN_META-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable
2024-12-08T04:26:26,489 DEBUG [RS_OPEN_META-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740
2024-12-08T04:26:26,489 DEBUG [RS_OPEN_META-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740
2024-12-08T04:26:26,495 INFO  [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 
2024-12-08T04:26:26,498 INFO  [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info
2024-12-08T04:26:26,498 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker
2024-12-08T04:26:26,500 INFO  [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info,  memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE
2024-12-08T04:26:26,500 INFO  [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 
2024-12-08T04:26:26,502 INFO  [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier
2024-12-08T04:26:26,502 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker
2024-12-08T04:26:26,503 INFO  [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier,  memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE
2024-12-08T04:26:26,504 INFO  [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 
2024-12-08T04:26:26,506 INFO  [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table
2024-12-08T04:26:26,506 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker
2024-12-08T04:26:26,507 INFO  [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table,  memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE
2024-12-08T04:26:26,509 DEBUG [RS_OPEN_META-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/hbase/meta/1588230740
2024-12-08T04:26:26,512 DEBUG [RS_OPEN_META-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/hbase/meta/1588230740
2024-12-08T04:26:26,516 DEBUG [RS_OPEN_META-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead.
2024-12-08T04:26:26,519 DEBUG [RS_OPEN_META-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740
2024-12-08T04:26:26,521 INFO  [RS_OPEN_META-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75305855, jitterRate=0.1221446841955185}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242}
2024-12-08T04:26:26,525 DEBUG [RS_OPEN_META-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740:

2024-12-08T04:26:26,534 INFO  [RS_OPEN_META-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733631986407
2024-12-08T04:26:26,548 DEBUG [RS_OPEN_META-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740
2024-12-08T04:26:26,548 INFO  [RS_OPEN_META-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740
2024-12-08T04:26:26,550 INFO  [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=428ded7e54d6,41743,1733631984189
2024-12-08T04:26:26,553 INFO  [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 428ded7e54d6,41743,1733631984189, state=OPEN
2024-12-08T04:26:26,556 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server
2024-12-08T04:26:26,556 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server
2024-12-08T04:26:26,556 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED
2024-12-08T04:26:26,556 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED
2024-12-08T04:26:26,556 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46421-0x1006fe072e80002, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server
2024-12-08T04:26:26,556 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server
2024-12-08T04:26:26,556 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED
2024-12-08T04:26:26,557 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED
2024-12-08T04:26:26,563 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2
2024-12-08T04:26:26,563 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=428ded7e54d6,41743,1733631984189 in 323 msec
2024-12-08T04:26:26,572 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1
2024-12-08T04:26:26,572 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 813 msec
2024-12-08T04:26:26,582 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 1.1150 sec
2024-12-08T04:26:26,583 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733631986582, completionTime=-1
2024-12-08T04:26:26,583 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running
2024-12-08T04:26:26,583 DEBUG [master/428ded7e54d6:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster...
2024-12-08T04:26:26,630 DEBUG [hconnection-0x28111a62-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false
2024-12-08T04:26:26,633 INFO  [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52826, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService
2024-12-08T04:26:26,650 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=3
2024-12-08T04:26:26,650 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733632046650
2024-12-08T04:26:26,650 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733632106650
2024-12-08T04:26:26,650 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 67 msec
2024-12-08T04:26:26,674 DEBUG [master/428ded7e54d6:0:becomeActiveMaster {}] balancer.RegionLocationFinder(172): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache
2024-12-08T04:26:26,684 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=428ded7e54d6,46337,1733631983069-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled.
2024-12-08T04:26:26,685 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=428ded7e54d6,46337,1733631983069-BalancerChore, period=300000, unit=MILLISECONDS is enabled.
2024-12-08T04:26:26,685 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=428ded7e54d6,46337,1733631983069-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled.
2024-12-08T04:26:26,687 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-428ded7e54d6:46337, period=300000, unit=MILLISECONDS is enabled.
2024-12-08T04:26:26,687 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled.
2024-12-08T04:26:26,694 DEBUG [master/428ded7e54d6:0.Chore.1 {}] janitor.CatalogJanitor(179): 
2024-12-08T04:26:26,697 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating...
2024-12-08T04:26:26,698 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}
2024-12-08T04:26:26,706 DEBUG [master/428ded7e54d6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace
2024-12-08T04:26:26,712 INFO  [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION
2024-12-08T04:26:26,714 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker
2024-12-08T04:26:26,717 INFO  [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT
2024-12-08T04:26:26,776 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741837_1013 (size=358)
2024-12-08T04:26:26,777 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741837_1013 (size=358)
2024-12-08T04:26:26,777 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741837_1013 (size=358)
2024-12-08T04:26:27,181 INFO  [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 092cf4729ca6e7ca2b7aa78df922ed6c, NAME => 'hbase:namespace,,1733631986698.092cf4729ca6e7ca2b7aa78df922ed6c.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720
2024-12-08T04:26:27,197 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741838_1014 (size=42)
2024-12-08T04:26:27,198 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741838_1014 (size=42)
2024-12-08T04:26:27,198 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741838_1014 (size=42)
2024-12-08T04:26:27,200 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733631986698.092cf4729ca6e7ca2b7aa78df922ed6c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable
2024-12-08T04:26:27,200 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 092cf4729ca6e7ca2b7aa78df922ed6c, disabling compactions & flushes
2024-12-08T04:26:27,200 INFO  [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733631986698.092cf4729ca6e7ca2b7aa78df922ed6c.
2024-12-08T04:26:27,200 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733631986698.092cf4729ca6e7ca2b7aa78df922ed6c.
2024-12-08T04:26:27,200 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733631986698.092cf4729ca6e7ca2b7aa78df922ed6c. after waiting 0 ms
2024-12-08T04:26:27,200 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733631986698.092cf4729ca6e7ca2b7aa78df922ed6c.
2024-12-08T04:26:27,200 INFO  [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1733631986698.092cf4729ca6e7ca2b7aa78df922ed6c.
2024-12-08T04:26:27,201 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 092cf4729ca6e7ca2b7aa78df922ed6c:

2024-12-08T04:26:27,203 INFO  [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META
2024-12-08T04:26:27,212 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1733631986698.092cf4729ca6e7ca2b7aa78df922ed6c.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1733631987205"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733631987205"}]},"ts":"1733631987205"}
2024-12-08T04:26:27,246 INFO  [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta.
2024-12-08T04:26:27,249 INFO  [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS
2024-12-08T04:26:27,252 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733631987249"}]},"ts":"1733631987249"}
2024-12-08T04:26:27,259 INFO  [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta
2024-12-08T04:26:27,265 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(202): Hosts are {428ded7e54d6=0} racks are {/default-rack=0}
2024-12-08T04:26:27,267 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 is on host 0
2024-12-08T04:26:27,267 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 is on host 0
2024-12-08T04:26:27,267 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 is on host 0
2024-12-08T04:26:27,267 INFO  [PEWorker-3 {}] balancer.BalancerClusterState(314): server 0 is on rack 0
2024-12-08T04:26:27,267 INFO  [PEWorker-3 {}] balancer.BalancerClusterState(314): server 1 is on rack 0
2024-12-08T04:26:27,267 INFO  [PEWorker-3 {}] balancer.BalancerClusterState(314): server 2 is on rack 0
2024-12-08T04:26:27,267 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1
2024-12-08T04:26:27,269 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=092cf4729ca6e7ca2b7aa78df922ed6c, ASSIGN}]
2024-12-08T04:26:27,272 INFO  [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=092cf4729ca6e7ca2b7aa78df922ed6c, ASSIGN
2024-12-08T04:26:27,275 INFO  [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=092cf4729ca6e7ca2b7aa78df922ed6c, ASSIGN; state=OFFLINE, location=428ded7e54d6,41743,1733631984189; forceNewPlan=false, retain=false
2024-12-08T04:26:27,426 INFO  [428ded7e54d6:46337 {}] balancer.BaseLoadBalancer(546): Reassigned 1 regions. 1 retained the pre-restart assignment. 
2024-12-08T04:26:27,427 INFO  [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=092cf4729ca6e7ca2b7aa78df922ed6c, regionState=OPENING, regionLocation=428ded7e54d6,41743,1733631984189
2024-12-08T04:26:27,434 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 092cf4729ca6e7ca2b7aa78df922ed6c, server=428ded7e54d6,41743,1733631984189}]
2024-12-08T04:26:27,591 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,41743,1733631984189
2024-12-08T04:26:27,601 INFO  [RS_OPEN_PRIORITY_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1733631986698.092cf4729ca6e7ca2b7aa78df922ed6c.
2024-12-08T04:26:27,602 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 092cf4729ca6e7ca2b7aa78df922ed6c, NAME => 'hbase:namespace,,1733631986698.092cf4729ca6e7ca2b7aa78df922ed6c.', STARTKEY => '', ENDKEY => ''}
2024-12-08T04:26:27,602 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:namespace,,1733631986698.092cf4729ca6e7ca2b7aa78df922ed6c. service=AccessControlService
2024-12-08T04:26:27,602 INFO  [RS_OPEN_PRIORITY_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911.
2024-12-08T04:26:27,603 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 092cf4729ca6e7ca2b7aa78df922ed6c
2024-12-08T04:26:27,603 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733631986698.092cf4729ca6e7ca2b7aa78df922ed6c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable
2024-12-08T04:26:27,603 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 092cf4729ca6e7ca2b7aa78df922ed6c
2024-12-08T04:26:27,603 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 092cf4729ca6e7ca2b7aa78df922ed6c
2024-12-08T04:26:27,606 INFO  [StoreOpener-092cf4729ca6e7ca2b7aa78df922ed6c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 092cf4729ca6e7ca2b7aa78df922ed6c 
2024-12-08T04:26:27,608 INFO  [StoreOpener-092cf4729ca6e7ca2b7aa78df922ed6c-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 092cf4729ca6e7ca2b7aa78df922ed6c columnFamilyName info
2024-12-08T04:26:27,609 DEBUG [StoreOpener-092cf4729ca6e7ca2b7aa78df922ed6c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker
2024-12-08T04:26:27,611 INFO  [StoreOpener-092cf4729ca6e7ca2b7aa78df922ed6c-1 {}] regionserver.HStore(327): Store=092cf4729ca6e7ca2b7aa78df922ed6c/info,  memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE
2024-12-08T04:26:27,614 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/hbase/namespace/092cf4729ca6e7ca2b7aa78df922ed6c
2024-12-08T04:26:27,615 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/hbase/namespace/092cf4729ca6e7ca2b7aa78df922ed6c
2024-12-08T04:26:27,619 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 092cf4729ca6e7ca2b7aa78df922ed6c
2024-12-08T04:26:27,624 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/hbase/namespace/092cf4729ca6e7ca2b7aa78df922ed6c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1
2024-12-08T04:26:27,627 INFO  [RS_OPEN_PRIORITY_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 092cf4729ca6e7ca2b7aa78df922ed6c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63700609, jitterRate=-0.050786957144737244}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1}
2024-12-08T04:26:27,628 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 092cf4729ca6e7ca2b7aa78df922ed6c:

2024-12-08T04:26:27,631 INFO  [RS_OPEN_PRIORITY_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1733631986698.092cf4729ca6e7ca2b7aa78df922ed6c., pid=6, masterSystemTime=1733631987591
2024-12-08T04:26:27,635 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1733631986698.092cf4729ca6e7ca2b7aa78df922ed6c.
2024-12-08T04:26:27,635 INFO  [RS_OPEN_PRIORITY_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1733631986698.092cf4729ca6e7ca2b7aa78df922ed6c.
2024-12-08T04:26:27,636 INFO  [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=092cf4729ca6e7ca2b7aa78df922ed6c, regionState=OPEN, openSeqNum=2, regionLocation=428ded7e54d6,41743,1733631984189
2024-12-08T04:26:27,650 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5
2024-12-08T04:26:27,652 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 092cf4729ca6e7ca2b7aa78df922ed6c, server=428ded7e54d6,41743,1733631984189 in 207 msec
2024-12-08T04:26:27,655 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4
2024-12-08T04:26:27,655 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=092cf4729ca6e7ca2b7aa78df922ed6c, ASSIGN in 381 msec
2024-12-08T04:26:27,660 INFO  [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE
2024-12-08T04:26:27,660 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733631987660"}]},"ts":"1733631987660"}
2024-12-08T04:26:27,664 INFO  [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta
2024-12-08T04:26:27,670 INFO  [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION
2024-12-08T04:26:27,673 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 971 msec
2024-12-08T04:26:27,711 DEBUG [master/428ded7e54d6:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace
2024-12-08T04:26:27,713 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase
2024-12-08T04:26:27,713 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase
2024-12-08T04:26:27,713 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace
2024-12-08T04:26:27,713 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46421-0x1006fe072e80002, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase
2024-12-08T04:26:27,713 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase
2024-12-08T04:26:27,749 DEBUG [master/428ded7e54d6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default
2024-12-08T04:26:27,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace
2024-12-08T04:26:27,771 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 25 msec
2024-12-08T04:26:27,783 DEBUG [master/428ded7e54d6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase
2024-12-08T04:26:27,797 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace
2024-12-08T04:26:27,803 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 18 msec
2024-12-08T04:26:27,811 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default
2024-12-08T04:26:27,815 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase
2024-12-08T04:26:27,815 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 3.506sec
2024-12-08T04:26:27,817 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled
2024-12-08T04:26:27,818 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting.
2024-12-08T04:26:27,819 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting.
2024-12-08T04:26:27,820 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting.
2024-12-08T04:26:27,820 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding
2024-12-08T04:26:27,821 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=428ded7e54d6,46337,1733631983069-MobFileCleanerChore, period=86400, unit=SECONDS is enabled.
2024-12-08T04:26:27,821 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=428ded7e54d6,46337,1733631983069-MobFileCompactionChore, period=604800, unit=SECONDS is enabled.
2024-12-08T04:26:27,899 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4d29c4c8 to 127.0.0.1:55878 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7e732ab7
2024-12-08T04:26:27,904 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] master.HMaster$4(2389): Client=null/null create 'hbase:acl', {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}
2024-12-08T04:26:27,905 WARN  [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry
2024-12-08T04:26:27,918 DEBUG [master/428ded7e54d6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:acl
2024-12-08T04:26:27,920 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4ecdd1c3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null
2024-12-08T04:26:27,922 INFO  [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_PRE_OPERATION
2024-12-08T04:26:27,922 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker
2024-12-08T04:26:27,924 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] master.MasterRpcServices(713): Client=null/null procedure request for creating table: namespace: "hbase"
qualifier: "acl"
 procId is: 9
2024-12-08T04:26:27,924 INFO  [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_WRITE_FS_LAYOUT
2024-12-08T04:26:27,927 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false
2024-12-08T04:26:27,927 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512
2024-12-08T04:26:27,934 DEBUG [master/428ded7e54d6:0:becomeActiveMaster {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9
2024-12-08T04:26:27,951 DEBUG [hconnection-0x49ff6bd7-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false
2024-12-08T04:26:27,952 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741839_1015 (size=349)
2024-12-08T04:26:27,953 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741839_1015 (size=349)
2024-12-08T04:26:27,953 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741839_1015 (size=349)
2024-12-08T04:26:27,959 INFO  [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => ab3154ca90ccc96a74d87ae33022559e, NAME => 'hbase:acl,,1733631987898.ab3154ca90ccc96a74d87ae33022559e.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:acl', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720
2024-12-08T04:26:27,966 INFO  [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52836, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService
2024-12-08T04:26:27,969 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=428ded7e54d6,46337,1733631983069
2024-12-08T04:26:27,970 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(2790): Starting mini mapreduce cluster...
2024-12-08T04:26:27,970 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/test.cache.data in system properties and HBase conf
2024-12-08T04:26:27,970 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/hadoop.tmp.dir in system properties and HBase conf
2024-12-08T04:26:27,970 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/hadoop.log.dir in system properties and HBase conf
2024-12-08T04:26:27,970 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/mapreduce.cluster.local.dir in system properties and HBase conf
2024-12-08T04:26:27,970 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/mapreduce.cluster.temp.dir in system properties and HBase conf
2024-12-08T04:26:27,970 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF
2024-12-08T04:26:27,970 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/yarn.node-labels.fs-store.root-dir in system properties and HBase conf
2024-12-08T04:26:27,970 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf
2024-12-08T04:26:27,970 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/yarn.nodemanager.log-dirs in system properties and HBase conf
2024-12-08T04:26:27,970 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf
2024-12-08T04:26:27,970 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf
2024-12-08T04:26:27,970 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf
2024-12-08T04:26:27,970 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf
2024-12-08T04:26:27,970 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/dfs.journalnode.edits.dir in system properties and HBase conf
2024-12-08T04:26:27,970 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf
2024-12-08T04:26:27,970 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/nfs.dump.dir in system properties and HBase conf
2024-12-08T04:26:27,971 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/java.io.tmpdir in system properties and HBase conf
2024-12-08T04:26:27,971 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/dfs.journalnode.edits.dir in system properties and HBase conf
2024-12-08T04:26:27,971 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf
2024-12-08T04:26:27,971 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/fs.s3a.committer.staging.tmp.path in system properties and HBase conf
2024-12-08T04:26:27,976 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741840_1016 (size=36)
2024-12-08T04:26:27,977 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741840_1016 (size=36)
2024-12-08T04:26:27,977 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741840_1016 (size=36)
2024-12-08T04:26:27,978 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:acl,,1733631987898.ab3154ca90ccc96a74d87ae33022559e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable
2024-12-08T04:26:27,979 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1681): Closing ab3154ca90ccc96a74d87ae33022559e, disabling compactions & flushes
2024-12-08T04:26:27,979 INFO  [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:acl,,1733631987898.ab3154ca90ccc96a74d87ae33022559e.
2024-12-08T04:26:27,979 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:acl,,1733631987898.ab3154ca90ccc96a74d87ae33022559e.
2024-12-08T04:26:27,979 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:acl,,1733631987898.ab3154ca90ccc96a74d87ae33022559e. after waiting 0 ms
2024-12-08T04:26:27,979 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:acl,,1733631987898.ab3154ca90ccc96a74d87ae33022559e.
2024-12-08T04:26:27,979 INFO  [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1922): Closed hbase:acl,,1733631987898.ab3154ca90ccc96a74d87ae33022559e.
2024-12-08T04:26:27,979 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1635): Region close journal for ab3154ca90ccc96a74d87ae33022559e:

2024-12-08T04:26:27,983 INFO  [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ADD_TO_META
2024-12-08T04:26:27,984 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:acl,,1733631987898.ab3154ca90ccc96a74d87ae33022559e.","families":{"info":[{"qualifier":"regioninfo","vlen":35,"tag":[],"timestamp":"1733631987983"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733631987983"}]},"ts":"1733631987983"}
2024-12-08T04:26:27,989 INFO  [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta.
2024-12-08T04:26:27,992 INFO  [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ASSIGN_REGIONS
2024-12-08T04:26:27,993 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733631987992"}]},"ts":"1733631987992"}
2024-12-08T04:26:27,996 INFO  [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:acl, state=ENABLING in hbase:meta
2024-12-08T04:26:28,001 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(202): Hosts are {428ded7e54d6=0} racks are {/default-rack=0}
2024-12-08T04:26:28,003 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 is on host 0
2024-12-08T04:26:28,003 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 is on host 0
2024-12-08T04:26:28,003 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 is on host 0
2024-12-08T04:26:28,003 INFO  [PEWorker-2 {}] balancer.BalancerClusterState(314): server 0 is on rack 0
2024-12-08T04:26:28,003 INFO  [PEWorker-2 {}] balancer.BalancerClusterState(314): server 1 is on rack 0
2024-12-08T04:26:28,003 INFO  [PEWorker-2 {}] balancer.BalancerClusterState(314): server 2 is on rack 0
2024-12-08T04:26:28,003 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1
2024-12-08T04:26:28,003 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:acl, region=ab3154ca90ccc96a74d87ae33022559e, ASSIGN}]
2024-12-08T04:26:28,009 INFO  [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:acl, region=ab3154ca90ccc96a74d87ae33022559e, ASSIGN
2024-12-08T04:26:28,011 INFO  [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:acl, region=ab3154ca90ccc96a74d87ae33022559e, ASSIGN; state=OFFLINE, location=428ded7e54d6,41743,1733631984189; forceNewPlan=false, retain=false
2024-12-08T04:26:28,036 DEBUG [master/428ded7e54d6:0:becomeActiveMaster {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9
2024-12-08T04:26:28,128 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741841_1017 (size=592039)
2024-12-08T04:26:28,136 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741841_1017 (size=592039)
2024-12-08T04:26:28,136 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741841_1017 (size=592039)
2024-12-08T04:26:28,161 INFO  [428ded7e54d6:46337 {}] balancer.BaseLoadBalancer(546): Reassigned 1 regions. 1 retained the pre-restart assignment. 
2024-12-08T04:26:28,162 INFO  [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=ab3154ca90ccc96a74d87ae33022559e, regionState=OPENING, regionLocation=428ded7e54d6,41743,1733631984189
2024-12-08T04:26:28,168 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure ab3154ca90ccc96a74d87ae33022559e, server=428ded7e54d6,41743,1733631984189}]
2024-12-08T04:26:28,237 DEBUG [master/428ded7e54d6:0:becomeActiveMaster {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9
2024-12-08T04:26:28,330 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,41743,1733631984189
2024-12-08T04:26:28,352 INFO  [RS_OPEN_PRIORITY_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] handler.AssignRegionHandler(135): Open hbase:acl,,1733631987898.ab3154ca90ccc96a74d87ae33022559e.
2024-12-08T04:26:28,352 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => ab3154ca90ccc96a74d87ae33022559e, NAME => 'hbase:acl,,1733631987898.ab3154ca90ccc96a74d87ae33022559e.', STARTKEY => '', ENDKEY => ''}
2024-12-08T04:26:28,353 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:acl,,1733631987898.ab3154ca90ccc96a74d87ae33022559e. service=AccessControlService
2024-12-08T04:26:28,353 INFO  [RS_OPEN_PRIORITY_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911.
2024-12-08T04:26:28,354 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table acl ab3154ca90ccc96a74d87ae33022559e
2024-12-08T04:26:28,354 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(894): Instantiated hbase:acl,,1733631987898.ab3154ca90ccc96a74d87ae33022559e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable
2024-12-08T04:26:28,354 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for ab3154ca90ccc96a74d87ae33022559e
2024-12-08T04:26:28,354 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for ab3154ca90ccc96a74d87ae33022559e
2024-12-08T04:26:28,368 INFO  [StoreOpener-ab3154ca90ccc96a74d87ae33022559e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family l of region ab3154ca90ccc96a74d87ae33022559e 
2024-12-08T04:26:28,376 INFO  [StoreOpener-ab3154ca90ccc96a74d87ae33022559e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ab3154ca90ccc96a74d87ae33022559e columnFamilyName l
2024-12-08T04:26:28,376 DEBUG [StoreOpener-ab3154ca90ccc96a74d87ae33022559e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker
2024-12-08T04:26:28,381 INFO  [StoreOpener-ab3154ca90ccc96a74d87ae33022559e-1 {}] regionserver.HStore(327): Store=ab3154ca90ccc96a74d87ae33022559e/l,  memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE
2024-12-08T04:26:28,384 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/hbase/acl/ab3154ca90ccc96a74d87ae33022559e
2024-12-08T04:26:28,385 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/hbase/acl/ab3154ca90ccc96a74d87ae33022559e
2024-12-08T04:26:28,389 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for ab3154ca90ccc96a74d87ae33022559e
2024-12-08T04:26:28,404 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/hbase/acl/ab3154ca90ccc96a74d87ae33022559e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1
2024-12-08T04:26:28,408 INFO  [RS_OPEN_PRIORITY_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(1102): Opened ab3154ca90ccc96a74d87ae33022559e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72268265, jitterRate=0.07688106596469879}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1}
2024-12-08T04:26:28,410 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for ab3154ca90ccc96a74d87ae33022559e:

2024-12-08T04:26:28,412 INFO  [RS_OPEN_PRIORITY_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:acl,,1733631987898.ab3154ca90ccc96a74d87ae33022559e., pid=11, masterSystemTime=1733631988330
2024-12-08T04:26:28,416 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:acl,,1733631987898.ab3154ca90ccc96a74d87ae33022559e.
2024-12-08T04:26:28,416 INFO  [RS_OPEN_PRIORITY_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] handler.AssignRegionHandler(164): Opened hbase:acl,,1733631987898.ab3154ca90ccc96a74d87ae33022559e.
2024-12-08T04:26:28,417 INFO  [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=ab3154ca90ccc96a74d87ae33022559e, regionState=OPEN, openSeqNum=2, regionLocation=428ded7e54d6,41743,1733631984189
2024-12-08T04:26:28,425 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10
2024-12-08T04:26:28,426 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure ab3154ca90ccc96a74d87ae33022559e, server=428ded7e54d6,41743,1733631984189 in 253 msec
2024-12-08T04:26:28,430 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9
2024-12-08T04:26:28,430 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=hbase:acl, region=ab3154ca90ccc96a74d87ae33022559e, ASSIGN in 423 msec
2024-12-08T04:26:28,432 INFO  [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_UPDATE_DESC_CACHE
2024-12-08T04:26:28,432 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733631988432"}]},"ts":"1733631988432"}
2024-12-08T04:26:28,435 INFO  [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:acl, state=ENABLED in hbase:meta
2024-12-08T04:26:28,442 INFO  [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_POST_OPERATION
2024-12-08T04:26:28,448 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=hbase:acl in 537 msec
2024-12-08T04:26:28,537 DEBUG [master/428ded7e54d6:0:becomeActiveMaster {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9
2024-12-08T04:26:28,538 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: hbase:acl, procId: 9 completed
2024-12-08T04:26:28,563 DEBUG [master/428ded7e54d6:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds
2024-12-08T04:26:28,564 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled.
2024-12-08T04:26:28,565 INFO  [master/428ded7e54d6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=428ded7e54d6,46337,1733631983069-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled.
2024-12-08T04:26:28,735 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741842_1018 (size=1663647)
2024-12-08T04:26:28,735 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741842_1018 (size=1663647)
2024-12-08T04:26:28,736 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741842_1018 (size=1663647)
2024-12-08T04:26:30,663 WARN  [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret
2024-12-08T04:26:30,789 WARN  [Thread-399 {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret
2024-12-08T04:26:31,149 INFO  [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9
2024-12-08T04:26:31,153 WARN  [Thread-399 {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected.
2024-12-08T04:26:31,155 INFO  [Thread-399 {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9
2024-12-08T04:26:31,208 INFO  [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0
2024-12-08T04:26:31,209 INFO  [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults
2024-12-08T04:26:31,209 INFO  [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms
2024-12-08T04:26:31,234 WARN  [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret
2024-12-08T04:26:31,236 INFO  [Thread-399 {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0
2024-12-08T04:26:31,236 INFO  [Thread-399 {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults
2024-12-08T04:26:31,236 INFO  [Thread-399 {}] session.HouseKeeper(132): node0 Scavenging every 660000ms
2024-12-08T04:26:31,245 INFO  [Thread-399 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4d920d82{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/hadoop.log.dir/,AVAILABLE}
2024-12-08T04:26:31,246 INFO  [Thread-399 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@79f1c1ff{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE}
2024-12-08T04:26:31,258 INFO  [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@37c1c2de{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/hadoop.log.dir/,AVAILABLE}
2024-12-08T04:26:31,258 INFO  [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@39328466{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE}
2024-12-08T04:26:31,452 INFO  [Thread-399 {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.mapreduce.v2.hs.webapp.HsWebServices as a root resource class
2024-12-08T04:26:31,452 INFO  [Thread-399 {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.mapreduce.v2.hs.webapp.JAXBContextResolver as a provider class
2024-12-08T04:26:31,452 INFO  [Thread-399 {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class
2024-12-08T04:26:31,455 INFO  [Thread-399 {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM'
2024-12-08T04:26:31,526 INFO  [Thread-399 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.mapreduce.v2.hs.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton"
2024-12-08T04:26:31,745 INFO  [Thread-399 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton"
2024-12-08T04:26:32,009 WARN  [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties
2024-12-08T04:26:32,201 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta'
2024-12-08T04:26:32,203 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace'
2024-12-08T04:26:32,205 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:acl'
2024-12-08T04:26:32,245 INFO  [Thread-399 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.mapreduce.v2.hs.webapp.HsWebServices to GuiceManagedComponentProvider with the scope "PerRequest"
2024-12-08T04:26:32,276 INFO  [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3a6658ca{cluster,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/java.io.tmpdir/jetty-localhost-40339-hadoop-yarn-common-3_4_1_jar-_-any-17500389030550171184/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster}
2024-12-08T04:26:32,277 INFO  [Thread-399 {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@8315674{jobhistory,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/java.io.tmpdir/jetty-localhost-33955-hadoop-yarn-common-3_4_1_jar-_-any-13531626198382047256/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory}
2024-12-08T04:26:32,279 INFO  [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6f0174e7{HTTP/1.1, (http/1.1)}{localhost:40339}
2024-12-08T04:26:32,279 INFO  [Time-limited test {}] server.Server(415): Started @17598ms
2024-12-08T04:26:32,289 INFO  [Thread-399 {}] server.AbstractConnector(333): Started ServerConnector@1fac6a92{HTTP/1.1, (http/1.1)}{localhost:33955}
2024-12-08T04:26:32,289 INFO  [Thread-399 {}] server.Server(415): Started @17609ms
2024-12-08T04:26:32,607 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741843_1019 (size=5)
2024-12-08T04:26:32,607 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741843_1019 (size=5)
2024-12-08T04:26:32,609 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741843_1019 (size=5)
2024-12-08T04:26:33,596 WARN  [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker.
2024-12-08T04:26:33,602 WARN  [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret
2024-12-08T04:26:33,633 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController
2024-12-08T04:26:33,633 INFO  [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionObservers
2024-12-08T04:26:33,634 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store
2024-12-08T04:26:33,634 INFO  [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer
2024-12-08T04:26:33,635 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController
2024-12-08T04:26:33,636 INFO  [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionServerObservers
2024-12-08T04:26:33,636 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta
2024-12-08T04:26:33,636 INFO  [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer
2024-12-08T04:26:33,637 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace
2024-12-08T04:26:33,638 INFO  [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer
2024-12-08T04:26:33,639 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl
2024-12-08T04:26:33,639 INFO  [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_acl Metrics about Tables on a single HBase RegionServer
2024-12-08T04:26:33,641 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController
2024-12-08T04:26:33,641 INFO  [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase MasterObservers
2024-12-08T04:26:33,641 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver
2024-12-08T04:26:33,641 INFO  [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver Metrics about HBase MasterObservers
2024-12-08T04:26:33,641 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint
2024-12-08T04:26:33,642 INFO  [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers
2024-12-08T04:26:33,651 WARN  [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected.
2024-12-08T04:26:33,652 INFO  [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9
2024-12-08T04:26:33,672 INFO  [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0
2024-12-08T04:26:33,672 INFO  [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults
2024-12-08T04:26:33,672 INFO  [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms
2024-12-08T04:26:33,674 WARN  [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret
2024-12-08T04:26:33,675 INFO  [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3c5b84e3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/hadoop.log.dir/,AVAILABLE}
2024-12-08T04:26:33,675 INFO  [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@57cb4dc1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE}
2024-12-08T04:26:33,782 INFO  [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class
2024-12-08T04:26:33,782 INFO  [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class
2024-12-08T04:26:33,782 INFO  [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class
2024-12-08T04:26:33,782 INFO  [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM'
2024-12-08T04:26:33,801 INFO  [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton"
2024-12-08T04:26:33,831 INFO  [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton"
2024-12-08T04:26:33,968 INFO  [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton"
2024-12-08T04:26:33,982 INFO  [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@594b2648{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/java.io.tmpdir/jetty-localhost-41211-hadoop-yarn-common-3_4_1_jar-_-any-15356060587725716830/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node}
2024-12-08T04:26:33,983 INFO  [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@64af6801{HTTP/1.1, (http/1.1)}{localhost:41211}
2024-12-08T04:26:33,983 INFO  [Time-limited test {}] server.Server(415): Started @19302ms
2024-12-08T04:26:34,208 WARN  [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker.
2024-12-08T04:26:34,211 WARN  [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret
2024-12-08T04:26:34,225 WARN  [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected.
2024-12-08T04:26:34,226 INFO  [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9
2024-12-08T04:26:34,228 INFO  [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0
2024-12-08T04:26:34,228 INFO  [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults
2024-12-08T04:26:34,228 INFO  [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms
2024-12-08T04:26:34,229 WARN  [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret
2024-12-08T04:26:34,231 INFO  [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5699d755{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/hadoop.log.dir/,AVAILABLE}
2024-12-08T04:26:34,231 INFO  [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@30336541{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE}
2024-12-08T04:26:34,299 INFO  [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class
2024-12-08T04:26:34,300 INFO  [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class
2024-12-08T04:26:34,300 INFO  [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class
2024-12-08T04:26:34,300 INFO  [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM'
2024-12-08T04:26:34,308 INFO  [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton"
2024-12-08T04:26:34,324 INFO  [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton"
2024-12-08T04:26:34,499 INFO  [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton"
2024-12-08T04:26:34,506 INFO  [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@584a4c3{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/java.io.tmpdir/jetty-localhost-41361-hadoop-yarn-common-3_4_1_jar-_-any-9004303929756702298/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node}
2024-12-08T04:26:34,511 INFO  [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4f58eb73{HTTP/1.1, (http/1.1)}{localhost:41361}
2024-12-08T04:26:34,511 INFO  [Time-limited test {}] server.Server(415): Started @19830ms
2024-12-08T04:26:34,576 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(2825): Mini mapreduce cluster started
2024-12-08T04:26:34,578 INFO  [Time-limited test {}] hbase.Waiter(181): Waiting up to [30,000] milli-secs(wait.for.ratio=[1])
2024-12-08T04:26:34,624 INFO  [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithTargetName Thread=718, OpenFileDescriptor=779, MaxFileDescriptor=1048576, SystemLoadAverage=383, ProcessCount=11, AvailableMemoryMB=6148
2024-12-08T04:26:34,625 WARN  [Time-limited test {}] hbase.ResourceChecker(130): Thread=718 is superior to 500
2024-12-08T04:26:34,636 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false
2024-12-08T04:26:34,639 INFO  [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55472, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService
2024-12-08T04:26:34,645 INFO  [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}
2024-12-08T04:26:34,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportWithTargetName
2024-12-08T04:26:34,649 INFO  [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=12, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_PRE_OPERATION
2024-12-08T04:26:34,649 INFO  [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default"
qualifier: "testtb-testExportWithTargetName"
 procId is: 12
2024-12-08T04:26:34,650 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker
2024-12-08T04:26:34,652 INFO  [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=12, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_WRITE_FS_LAYOUT
2024-12-08T04:26:34,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12
2024-12-08T04:26:34,675 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741844_1020 (size=406)
2024-12-08T04:26:34,675 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741844_1020 (size=406)
2024-12-08T04:26:34,680 INFO  [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 2be016cf061cf04bc5ed902016276d60, NAME => 'testtb-testExportWithTargetName,,1733631994644.2be016cf061cf04bc5ed902016276d60.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720
2024-12-08T04:26:34,680 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741844_1020 (size=406)
2024-12-08T04:26:34,680 INFO  [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => de88e9e2ac98bd5abe6e5139a280170f, NAME => 'testtb-testExportWithTargetName,1,1733631994644.de88e9e2ac98bd5abe6e5139a280170f.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720
2024-12-08T04:26:34,699 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741845_1021 (size=67)
2024-12-08T04:26:34,699 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741845_1021 (size=67)
2024-12-08T04:26:34,700 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741845_1021 (size=67)
2024-12-08T04:26:34,701 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportWithTargetName,,1733631994644.2be016cf061cf04bc5ed902016276d60.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable
2024-12-08T04:26:34,702 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1681): Closing 2be016cf061cf04bc5ed902016276d60, disabling compactions & flushes
2024-12-08T04:26:34,702 INFO  [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportWithTargetName,,1733631994644.2be016cf061cf04bc5ed902016276d60.
2024-12-08T04:26:34,702 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithTargetName,,1733631994644.2be016cf061cf04bc5ed902016276d60.
2024-12-08T04:26:34,702 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithTargetName,,1733631994644.2be016cf061cf04bc5ed902016276d60. after waiting 0 ms
2024-12-08T04:26:34,702 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithTargetName,,1733631994644.2be016cf061cf04bc5ed902016276d60.
2024-12-08T04:26:34,702 INFO  [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportWithTargetName,,1733631994644.2be016cf061cf04bc5ed902016276d60.
2024-12-08T04:26:34,702 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1635): Region close journal for 2be016cf061cf04bc5ed902016276d60:

2024-12-08T04:26:34,715 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741846_1022 (size=67)
2024-12-08T04:26:34,715 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741846_1022 (size=67)
2024-12-08T04:26:34,716 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741846_1022 (size=67)
2024-12-08T04:26:34,718 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportWithTargetName,1,1733631994644.de88e9e2ac98bd5abe6e5139a280170f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable
2024-12-08T04:26:34,718 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1681): Closing de88e9e2ac98bd5abe6e5139a280170f, disabling compactions & flushes
2024-12-08T04:26:34,718 INFO  [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportWithTargetName,1,1733631994644.de88e9e2ac98bd5abe6e5139a280170f.
2024-12-08T04:26:34,718 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithTargetName,1,1733631994644.de88e9e2ac98bd5abe6e5139a280170f.
2024-12-08T04:26:34,718 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithTargetName,1,1733631994644.de88e9e2ac98bd5abe6e5139a280170f. after waiting 0 ms
2024-12-08T04:26:34,718 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithTargetName,1,1733631994644.de88e9e2ac98bd5abe6e5139a280170f.
2024-12-08T04:26:34,718 INFO  [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportWithTargetName,1,1733631994644.de88e9e2ac98bd5abe6e5139a280170f.
2024-12-08T04:26:34,718 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1635): Region close journal for de88e9e2ac98bd5abe6e5139a280170f:

2024-12-08T04:26:34,720 INFO  [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=12, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ADD_TO_META
2024-12-08T04:26:34,720 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,,1733631994644.2be016cf061cf04bc5ed902016276d60.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1733631994720"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733631994720"}]},"ts":"1733631994720"}
2024-12-08T04:26:34,721 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,1,1733631994644.de88e9e2ac98bd5abe6e5139a280170f.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1733631994720"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733631994720"}]},"ts":"1733631994720"}
2024-12-08T04:26:34,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12
2024-12-08T04:26:34,760 INFO  [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta.
2024-12-08T04:26:34,762 INFO  [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=12, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ASSIGN_REGIONS
2024-12-08T04:26:34,763 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733631994762"}]},"ts":"1733631994762"}
2024-12-08T04:26:34,765 INFO  [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithTargetName, state=ENABLING in hbase:meta
2024-12-08T04:26:34,771 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(202): Hosts are {428ded7e54d6=0} racks are {/default-rack=0}
2024-12-08T04:26:34,773 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 is on host 0
2024-12-08T04:26:34,773 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 is on host 0
2024-12-08T04:26:34,773 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 is on host 0
2024-12-08T04:26:34,773 INFO  [PEWorker-4 {}] balancer.BalancerClusterState(314): server 0 is on rack 0
2024-12-08T04:26:34,773 INFO  [PEWorker-4 {}] balancer.BalancerClusterState(314): server 1 is on rack 0
2024-12-08T04:26:34,773 INFO  [PEWorker-4 {}] balancer.BalancerClusterState(314): server 2 is on rack 0
2024-12-08T04:26:34,773 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1
2024-12-08T04:26:34,774 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=2be016cf061cf04bc5ed902016276d60, ASSIGN}, {pid=14, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=de88e9e2ac98bd5abe6e5139a280170f, ASSIGN}]
2024-12-08T04:26:34,776 INFO  [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=14, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=de88e9e2ac98bd5abe6e5139a280170f, ASSIGN
2024-12-08T04:26:34,776 INFO  [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=13, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=2be016cf061cf04bc5ed902016276d60, ASSIGN
2024-12-08T04:26:34,778 INFO  [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=14, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=de88e9e2ac98bd5abe6e5139a280170f, ASSIGN; state=OFFLINE, location=428ded7e54d6,46421,1733631984115; forceNewPlan=false, retain=false
2024-12-08T04:26:34,778 INFO  [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=13, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=2be016cf061cf04bc5ed902016276d60, ASSIGN; state=OFFLINE, location=428ded7e54d6,41743,1733631984189; forceNewPlan=false, retain=false
2024-12-08T04:26:34,928 INFO  [428ded7e54d6:46337 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 
2024-12-08T04:26:34,929 INFO  [PEWorker-3 {}] assignment.RegionStateStore(202): pid=14 updating hbase:meta row=de88e9e2ac98bd5abe6e5139a280170f, regionState=OPENING, regionLocation=428ded7e54d6,46421,1733631984115
2024-12-08T04:26:34,929 INFO  [PEWorker-1 {}] assignment.RegionStateStore(202): pid=13 updating hbase:meta row=2be016cf061cf04bc5ed902016276d60, regionState=OPENING, regionLocation=428ded7e54d6,41743,1733631984189
2024-12-08T04:26:34,939 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE; OpenRegionProcedure de88e9e2ac98bd5abe6e5139a280170f, server=428ded7e54d6,46421,1733631984115}]
2024-12-08T04:26:34,942 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=16, ppid=13, state=RUNNABLE; OpenRegionProcedure 2be016cf061cf04bc5ed902016276d60, server=428ded7e54d6,41743,1733631984189}]
2024-12-08T04:26:34,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12
2024-12-08T04:26:35,095 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,46421,1733631984115
2024-12-08T04:26:35,096 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false
2024-12-08T04:26:35,098 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,41743,1733631984189
2024-12-08T04:26:35,114 INFO  [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43688, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService
2024-12-08T04:26:35,120 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] handler.AssignRegionHandler(135): Open testtb-testExportWithTargetName,,1733631994644.2be016cf061cf04bc5ed902016276d60.
2024-12-08T04:26:35,120 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] handler.AssignRegionHandler(135): Open testtb-testExportWithTargetName,1,1733631994644.de88e9e2ac98bd5abe6e5139a280170f.
2024-12-08T04:26:35,121 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(7285): Opening region: {ENCODED => 2be016cf061cf04bc5ed902016276d60, NAME => 'testtb-testExportWithTargetName,,1733631994644.2be016cf061cf04bc5ed902016276d60.', STARTKEY => '', ENDKEY => '1'}
2024-12-08T04:26:35,121 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(7285): Opening region: {ENCODED => de88e9e2ac98bd5abe6e5139a280170f, NAME => 'testtb-testExportWithTargetName,1,1733631994644.de88e9e2ac98bd5abe6e5139a280170f.', STARTKEY => '1', ENDKEY => ''}
2024-12-08T04:26:35,121 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportWithTargetName,,1733631994644.2be016cf061cf04bc5ed902016276d60. service=AccessControlService
2024-12-08T04:26:35,121 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportWithTargetName,1,1733631994644.de88e9e2ac98bd5abe6e5139a280170f. service=AccessControlService
2024-12-08T04:26:35,121 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911.
2024-12-08T04:26:35,121 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName 2be016cf061cf04bc5ed902016276d60
2024-12-08T04:26:35,122 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911.
2024-12-08T04:26:35,122 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(894): Instantiated testtb-testExportWithTargetName,,1733631994644.2be016cf061cf04bc5ed902016276d60.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable
2024-12-08T04:26:35,122 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(7327): checking encryption for 2be016cf061cf04bc5ed902016276d60
2024-12-08T04:26:35,122 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName de88e9e2ac98bd5abe6e5139a280170f
2024-12-08T04:26:35,122 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(7330): checking classloading for 2be016cf061cf04bc5ed902016276d60
2024-12-08T04:26:35,122 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(894): Instantiated testtb-testExportWithTargetName,1,1733631994644.de88e9e2ac98bd5abe6e5139a280170f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable
2024-12-08T04:26:35,122 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(7327): checking encryption for de88e9e2ac98bd5abe6e5139a280170f
2024-12-08T04:26:35,122 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(7330): checking classloading for de88e9e2ac98bd5abe6e5139a280170f
2024-12-08T04:26:35,124 INFO  [StoreOpener-2be016cf061cf04bc5ed902016276d60-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 2be016cf061cf04bc5ed902016276d60 
2024-12-08T04:26:35,126 INFO  [StoreOpener-2be016cf061cf04bc5ed902016276d60-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2be016cf061cf04bc5ed902016276d60 columnFamilyName cf
2024-12-08T04:26:35,126 DEBUG [StoreOpener-2be016cf061cf04bc5ed902016276d60-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker
2024-12-08T04:26:35,127 INFO  [StoreOpener-2be016cf061cf04bc5ed902016276d60-1 {}] regionserver.HStore(327): Store=2be016cf061cf04bc5ed902016276d60/cf,  memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE
2024-12-08T04:26:35,128 INFO  [StoreOpener-de88e9e2ac98bd5abe6e5139a280170f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region de88e9e2ac98bd5abe6e5139a280170f 
2024-12-08T04:26:35,128 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithTargetName/2be016cf061cf04bc5ed902016276d60
2024-12-08T04:26:35,129 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithTargetName/2be016cf061cf04bc5ed902016276d60
2024-12-08T04:26:35,130 INFO  [StoreOpener-de88e9e2ac98bd5abe6e5139a280170f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region de88e9e2ac98bd5abe6e5139a280170f columnFamilyName cf
2024-12-08T04:26:35,130 DEBUG [StoreOpener-de88e9e2ac98bd5abe6e5139a280170f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker
2024-12-08T04:26:35,131 INFO  [StoreOpener-de88e9e2ac98bd5abe6e5139a280170f-1 {}] regionserver.HStore(327): Store=de88e9e2ac98bd5abe6e5139a280170f/cf,  memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE
2024-12-08T04:26:35,132 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithTargetName/de88e9e2ac98bd5abe6e5139a280170f
2024-12-08T04:26:35,133 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(1085): writing seq id for 2be016cf061cf04bc5ed902016276d60
2024-12-08T04:26:35,133 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithTargetName/de88e9e2ac98bd5abe6e5139a280170f
2024-12-08T04:26:35,137 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1085): writing seq id for de88e9e2ac98bd5abe6e5139a280170f
2024-12-08T04:26:35,137 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithTargetName/2be016cf061cf04bc5ed902016276d60/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1
2024-12-08T04:26:35,139 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(1102): Opened 2be016cf061cf04bc5ed902016276d60; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63720809, jitterRate=-0.05048595368862152}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1}
2024-12-08T04:26:35,141 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(1001): Region open journal for 2be016cf061cf04bc5ed902016276d60:

2024-12-08T04:26:35,144 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportWithTargetName,,1733631994644.2be016cf061cf04bc5ed902016276d60., pid=16, masterSystemTime=1733631995098
2024-12-08T04:26:35,148 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportWithTargetName,,1733631994644.2be016cf061cf04bc5ed902016276d60.
2024-12-08T04:26:35,148 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] handler.AssignRegionHandler(164): Opened testtb-testExportWithTargetName,,1733631994644.2be016cf061cf04bc5ed902016276d60.
2024-12-08T04:26:35,149 INFO  [PEWorker-2 {}] assignment.RegionStateStore(202): pid=13 updating hbase:meta row=2be016cf061cf04bc5ed902016276d60, regionState=OPEN, openSeqNum=2, regionLocation=428ded7e54d6,41743,1733631984189
2024-12-08T04:26:35,150 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithTargetName/de88e9e2ac98bd5abe6e5139a280170f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1
2024-12-08T04:26:35,151 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1102): Opened de88e9e2ac98bd5abe6e5139a280170f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62320772, jitterRate=-0.07134813070297241}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1}
2024-12-08T04:26:35,151 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1001): Region open journal for de88e9e2ac98bd5abe6e5139a280170f:

2024-12-08T04:26:35,154 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportWithTargetName,1,1733631994644.de88e9e2ac98bd5abe6e5139a280170f., pid=15, masterSystemTime=1733631995095
2024-12-08T04:26:35,157 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportWithTargetName,1,1733631994644.de88e9e2ac98bd5abe6e5139a280170f.
2024-12-08T04:26:35,157 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=16, resume processing ppid=13
2024-12-08T04:26:35,157 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] handler.AssignRegionHandler(164): Opened testtb-testExportWithTargetName,1,1733631994644.de88e9e2ac98bd5abe6e5139a280170f.
2024-12-08T04:26:35,157 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, ppid=13, state=SUCCESS; OpenRegionProcedure 2be016cf061cf04bc5ed902016276d60, server=428ded7e54d6,41743,1733631984189 in 210 msec
2024-12-08T04:26:35,159 INFO  [PEWorker-4 {}] assignment.RegionStateStore(202): pid=14 updating hbase:meta row=de88e9e2ac98bd5abe6e5139a280170f, regionState=OPEN, openSeqNum=2, regionLocation=428ded7e54d6,46421,1733631984115
2024-12-08T04:26:35,160 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=2be016cf061cf04bc5ed902016276d60, ASSIGN in 383 msec
2024-12-08T04:26:35,166 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=14
2024-12-08T04:26:35,166 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=14, state=SUCCESS; OpenRegionProcedure de88e9e2ac98bd5abe6e5139a280170f, server=428ded7e54d6,46421,1733631984115 in 223 msec
2024-12-08T04:26:35,172 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=14, resume processing ppid=12
2024-12-08T04:26:35,172 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, ppid=12, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=de88e9e2ac98bd5abe6e5139a280170f, ASSIGN in 392 msec
2024-12-08T04:26:35,173 INFO  [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=12, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_UPDATE_DESC_CACHE
2024-12-08T04:26:35,173 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733631995173"}]},"ts":"1733631995173"}
2024-12-08T04:26:35,176 INFO  [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithTargetName, state=ENABLED in hbase:meta
2024-12-08T04:26:35,180 INFO  [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=12, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_POST_OPERATION
2024-12-08T04:26:35,186 DEBUG [PEWorker-5 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportWithTargetName jenkins: RWXCA
2024-12-08T04:26:35,201 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41743 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA]
2024-12-08T04:26:35,238 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl
2024-12-08T04:26:35,238 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl
2024-12-08T04:26:35,238 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase
2024-12-08T04:26:35,238 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase
2024-12-08T04:26:35,238 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46421-0x1006fe072e80002, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl
2024-12-08T04:26:35,238 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46421-0x1006fe072e80002, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase
2024-12-08T04:26:35,238 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl
2024-12-08T04:26:35,238 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase
2024-12-08T04:26:35,259 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName
2024-12-08T04:26:35,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12
2024-12-08T04:26:35,261 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04
2024-12-08T04:26:35,261 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04
2024-12-08T04:26:35,261 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04
2024-12-08T04:26:35,261 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04
2024-12-08T04:26:35,263 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; CreateTableProcedure table=testtb-testExportWithTargetName in 615 msec
2024-12-08T04:26:35,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12
2024-12-08T04:26:35,762 INFO  [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportWithTargetName, procId: 12 completed
2024-12-08T04:26:35,762 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testExportWithTargetName get assigned. Timeout = 60000ms
2024-12-08T04:26:35,763 INFO  [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1])
2024-12-08T04:26:35,770 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testExportWithTargetName assigned to meta. Checking AM states.
2024-12-08T04:26:35,770 INFO  [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1])
2024-12-08T04:26:35,771 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testExportWithTargetName assigned.
2024-12-08T04:26:35,783 INFO  [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }
2024-12-08T04:26:35,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733631995783 (current time:1733631995783).
2024-12-08T04:26:35,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0
2024-12-08T04:26:35,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportWithTargetName VERSION not specified, setting to 2
2024-12-08T04:26:35,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot
2024-12-08T04:26:35,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2ca3a64b to 127.0.0.1:55878 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1562076b
2024-12-08T04:26:35,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@25e4402d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null
2024-12-08T04:26:35,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false
2024-12-08T04:26:35,795 INFO  [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57134, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService
2024-12-08T04:26:35,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2ca3a64b to 127.0.0.1:55878
2024-12-08T04:26:35,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] ipc.AbstractRpcClient(514): Stopping rpc client
2024-12-08T04:26:35,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x391d0db6 to 127.0.0.1:55878 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@700d92e9
2024-12-08T04:26:35,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@286a0a6b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null
2024-12-08T04:26:35,824 DEBUG [hconnection-0x6c0bd196-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false
2024-12-08T04:26:35,826 INFO  [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57142, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService
2024-12-08T04:26:35,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x391d0db6 to 127.0.0.1:55878
2024-12-08T04:26:35,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] ipc.AbstractRpcClient(514): Stopping rpc client
2024-12-08T04:26:35,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA]
2024-12-08T04:26:35,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot...
2024-12-08T04:26:35,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] procedure2.ProcedureExecutor(1098): Stored pid=17, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }
2024-12-08T04:26:35,874 INFO  [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE
2024-12-08T04:26:35,880 INFO  [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION
2024-12-08T04:26:35,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 17
2024-12-08T04:26:35,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=17
2024-12-08T04:26:35,902 INFO  [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO
2024-12-08T04:26:35,946 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741847_1023 (size=167)
2024-12-08T04:26:35,946 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741847_1023 (size=167)
2024-12-08T04:26:35,947 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741847_1023 (size=167)
2024-12-08T04:26:35,950 INFO  [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS
2024-12-08T04:26:35,954 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=18, ppid=17, state=RUNNABLE; SnapshotRegionProcedure 2be016cf061cf04bc5ed902016276d60}, {pid=19, ppid=17, state=RUNNABLE; SnapshotRegionProcedure de88e9e2ac98bd5abe6e5139a280170f}]
2024-12-08T04:26:35,959 INFO  [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=18, ppid=17, state=RUNNABLE; SnapshotRegionProcedure 2be016cf061cf04bc5ed902016276d60
2024-12-08T04:26:35,963 INFO  [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=19, ppid=17, state=RUNNABLE; SnapshotRegionProcedure de88e9e2ac98bd5abe6e5139a280170f
2024-12-08T04:26:35,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=17
2024-12-08T04:26:36,118 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,46421,1733631984115
2024-12-08T04:26:36,119 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,41743,1733631984189
2024-12-08T04:26:36,121 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46421 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=19
2024-12-08T04:26:36,121 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41743 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=18
2024-12-08T04:26:36,124 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1733631994644.de88e9e2ac98bd5abe6e5139a280170f.
2024-12-08T04:26:36,126 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for de88e9e2ac98bd5abe6e5139a280170f:

2024-12-08T04:26:36,126 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1733631994644.de88e9e2ac98bd5abe6e5139a280170f. for emptySnaptb0-testExportWithTargetName completed.
2024-12-08T04:26:36,127 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1733631994644.de88e9e2ac98bd5abe6e5139a280170f.' region-info for snapshot=emptySnaptb0-testExportWithTargetName
2024-12-08T04:26:36,133 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] snapshot.SnapshotManifest(246): Creating references for hfiles
2024-12-08T04:26:36,136 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1733631994644.2be016cf061cf04bc5ed902016276d60.
2024-12-08T04:26:36,136 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles
2024-12-08T04:26:36,137 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] regionserver.HRegion(2538): Flush status journal for 2be016cf061cf04bc5ed902016276d60:

2024-12-08T04:26:36,137 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1733631994644.2be016cf061cf04bc5ed902016276d60. for emptySnaptb0-testExportWithTargetName completed.
2024-12-08T04:26:36,138 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1733631994644.2be016cf061cf04bc5ed902016276d60.' region-info for snapshot=emptySnaptb0-testExportWithTargetName
2024-12-08T04:26:36,138 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] snapshot.SnapshotManifest(246): Creating references for hfiles
2024-12-08T04:26:36,138 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles
2024-12-08T04:26:36,158 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741848_1024 (size=70)
2024-12-08T04:26:36,161 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741848_1024 (size=70)
2024-12-08T04:26:36,163 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1733631994644.2be016cf061cf04bc5ed902016276d60.
2024-12-08T04:26:36,163 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741848_1024 (size=70)
2024-12-08T04:26:36,165 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=18
2024-12-08T04:26:36,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] master.HMaster(4106): Remote procedure done, pid=18
2024-12-08T04:26:36,169 INFO  [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region 2be016cf061cf04bc5ed902016276d60
2024-12-08T04:26:36,169 INFO  [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=18, ppid=17, state=RUNNABLE; SnapshotRegionProcedure 2be016cf061cf04bc5ed902016276d60
2024-12-08T04:26:36,169 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741849_1025 (size=70)
2024-12-08T04:26:36,170 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741849_1025 (size=70)
2024-12-08T04:26:36,172 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741849_1025 (size=70)
2024-12-08T04:26:36,174 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1733631994644.de88e9e2ac98bd5abe6e5139a280170f.
2024-12-08T04:26:36,175 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19
2024-12-08T04:26:36,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] master.HMaster(4106): Remote procedure done, pid=19
2024-12-08T04:26:36,175 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, ppid=17, state=SUCCESS; SnapshotRegionProcedure 2be016cf061cf04bc5ed902016276d60 in 218 msec
2024-12-08T04:26:36,175 INFO  [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region de88e9e2ac98bd5abe6e5139a280170f
2024-12-08T04:26:36,176 INFO  [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=19, ppid=17, state=RUNNABLE; SnapshotRegionProcedure de88e9e2ac98bd5abe6e5139a280170f
2024-12-08T04:26:36,181 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=19, resume processing ppid=17
2024-12-08T04:26:36,181 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=17, state=SUCCESS; SnapshotRegionProcedure de88e9e2ac98bd5abe6e5139a280170f in 224 msec
2024-12-08T04:26:36,181 INFO  [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS
2024-12-08T04:26:36,184 INFO  [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION
2024-12-08T04:26:36,189 INFO  [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT
2024-12-08T04:26:36,189 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithTargetName
2024-12-08T04:26:36,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=17
2024-12-08T04:26:36,192 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName
2024-12-08T04:26:36,220 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741850_1026 (size=549)
2024-12-08T04:26:36,221 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741850_1026 (size=549)
2024-12-08T04:26:36,221 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741850_1026 (size=549)
2024-12-08T04:26:36,225 INFO  [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT
2024-12-08T04:26:36,237 INFO  [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT
2024-12-08T04:26:36,238 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/emptySnaptb0-testExportWithTargetName
2024-12-08T04:26:36,242 INFO  [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION
2024-12-08T04:26:36,242 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 17
2024-12-08T04:26:36,244 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 379 msec
2024-12-08T04:26:36,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=17
2024-12-08T04:26:36,494 INFO  [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName, procId: 17 completed
2024-12-08T04:26:36,521 DEBUG [htable-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false
2024-12-08T04:26:36,522 INFO  [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41743 {}] regionserver.HRegion(8254): writing data to region testtb-testExportWithTargetName,,1733631994644.2be016cf061cf04bc5ed902016276d60. with WAL disabled. Data may be lost in the event of a crash.
2024-12-08T04:26:36,524 INFO  [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43692, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService
2024-12-08T04:26:36,527 INFO  [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46421 {}] regionserver.HRegion(8254): writing data to region testtb-testExportWithTargetName,1,1733631994644.de88e9e2ac98bd5abe6e5139a280170f. with WAL disabled. Data may be lost in the event of a crash.
2024-12-08T04:26:36,535 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportWithTargetName
2024-12-08T04:26:36,536 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportWithTargetName,,1733631994644.2be016cf061cf04bc5ed902016276d60.
2024-12-08T04:26:36,537 INFO  [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1])
2024-12-08T04:26:36,585 INFO  [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }
2024-12-08T04:26:36,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733631996585 (current time:1733631996585).
2024-12-08T04:26:36,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0
2024-12-08T04:26:36,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportWithTargetName VERSION not specified, setting to 2
2024-12-08T04:26:36,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot
2024-12-08T04:26:36,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1178b73d to 127.0.0.1:55878 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@512d45ce
2024-12-08T04:26:36,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b644d90, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null
2024-12-08T04:26:36,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false
2024-12-08T04:26:36,609 INFO  [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57146, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService
2024-12-08T04:26:36,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1178b73d to 127.0.0.1:55878
2024-12-08T04:26:36,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] ipc.AbstractRpcClient(514): Stopping rpc client
2024-12-08T04:26:36,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x19601e99 to 127.0.0.1:55878 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@22d82b4e
2024-12-08T04:26:36,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b29fa48, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null
2024-12-08T04:26:36,628 DEBUG [hconnection-0x3a77efc9-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false
2024-12-08T04:26:36,630 INFO  [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57162, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService
2024-12-08T04:26:36,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x19601e99 to 127.0.0.1:55878
2024-12-08T04:26:36,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] ipc.AbstractRpcClient(514): Stopping rpc client
2024-12-08T04:26:36,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA]
2024-12-08T04:26:36,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot...
2024-12-08T04:26:36,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }
2024-12-08T04:26:36,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 20
2024-12-08T04:26:36,641 INFO  [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE
2024-12-08T04:26:36,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20
2024-12-08T04:26:36,644 INFO  [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION
2024-12-08T04:26:36,654 INFO  [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO
2024-12-08T04:26:36,673 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741851_1027 (size=162)
2024-12-08T04:26:36,674 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741851_1027 (size=162)
2024-12-08T04:26:36,675 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741851_1027 (size=162)
2024-12-08T04:26:36,677 INFO  [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS
2024-12-08T04:26:36,678 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; SnapshotRegionProcedure 2be016cf061cf04bc5ed902016276d60}, {pid=22, ppid=20, state=RUNNABLE; SnapshotRegionProcedure de88e9e2ac98bd5abe6e5139a280170f}]
2024-12-08T04:26:36,680 INFO  [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=21, ppid=20, state=RUNNABLE; SnapshotRegionProcedure 2be016cf061cf04bc5ed902016276d60
2024-12-08T04:26:36,680 INFO  [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=22, ppid=20, state=RUNNABLE; SnapshotRegionProcedure de88e9e2ac98bd5abe6e5139a280170f
2024-12-08T04:26:36,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20
2024-12-08T04:26:36,831 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,46421,1733631984115
2024-12-08T04:26:36,831 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,41743,1733631984189
2024-12-08T04:26:36,833 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46421 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=22
2024-12-08T04:26:36,833 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41743 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=21
2024-12-08T04:26:36,840 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1733631994644.de88e9e2ac98bd5abe6e5139a280170f.
2024-12-08T04:26:36,841 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.HRegion(2837): Flushing de88e9e2ac98bd5abe6e5139a280170f 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB
2024-12-08T04:26:36,848 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1733631994644.2be016cf061cf04bc5ed902016276d60.
2024-12-08T04:26:36,848 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing 2be016cf061cf04bc5ed902016276d60 1/1 column families, dataSize=266 B heapSize=832 B
2024-12-08T04:26:36,937 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithTargetName/de88e9e2ac98bd5abe6e5139a280170f/.tmp/cf/e66bcfe7d1b6425c8cbbaa4be086b757 is 71, key is 17e3cec2ff55590ac83428a6dd411597/cf:q/1733631996526/Put/seqid=0
2024-12-08T04:26:36,941 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithTargetName/2be016cf061cf04bc5ed902016276d60/.tmp/cf/10f386223b5e4bd49dc659e4bde85c61 is 71, key is 079d062c077077982efba07ee63b73a0/cf:q/1733631996521/Put/seqid=0
2024-12-08T04:26:36,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20
2024-12-08T04:26:36,969 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741853_1029 (size=8258)
2024-12-08T04:26:36,971 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741853_1029 (size=8258)
2024-12-08T04:26:36,971 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741853_1029 (size=8258)
2024-12-08T04:26:36,971 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.00 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithTargetName/de88e9e2ac98bd5abe6e5139a280170f/.tmp/cf/e66bcfe7d1b6425c8cbbaa4be086b757
2024-12-08T04:26:37,010 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741852_1028 (size=5356)
2024-12-08T04:26:37,011 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741852_1028 (size=5356)
2024-12-08T04:26:37,012 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741852_1028 (size=5356)
2024-12-08T04:26:37,013 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=266 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithTargetName/2be016cf061cf04bc5ed902016276d60/.tmp/cf/10f386223b5e4bd49dc659e4bde85c61
2024-12-08T04:26:37,078 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithTargetName/de88e9e2ac98bd5abe6e5139a280170f/.tmp/cf/e66bcfe7d1b6425c8cbbaa4be086b757 as hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithTargetName/de88e9e2ac98bd5abe6e5139a280170f/cf/e66bcfe7d1b6425c8cbbaa4be086b757
2024-12-08T04:26:37,081 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithTargetName/2be016cf061cf04bc5ed902016276d60/.tmp/cf/10f386223b5e4bd49dc659e4bde85c61 as hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithTargetName/2be016cf061cf04bc5ed902016276d60/cf/10f386223b5e4bd49dc659e4bde85c61
2024-12-08T04:26:37,094 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithTargetName/de88e9e2ac98bd5abe6e5139a280170f/cf/e66bcfe7d1b6425c8cbbaa4be086b757, entries=46, sequenceid=6, filesize=8.1 K
2024-12-08T04:26:37,097 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.HRegion(3040): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for de88e9e2ac98bd5abe6e5139a280170f in 257ms, sequenceid=6, compaction requested=false
2024-12-08T04:26:37,098 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithTargetName'
2024-12-08T04:26:37,099 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.HRegion(2538): Flush status journal for de88e9e2ac98bd5abe6e5139a280170f:

2024-12-08T04:26:37,099 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1733631994644.de88e9e2ac98bd5abe6e5139a280170f. for snaptb0-testExportWithTargetName completed.
2024-12-08T04:26:37,099 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1733631994644.de88e9e2ac98bd5abe6e5139a280170f.' region-info for snapshot=snaptb0-testExportWithTargetName
2024-12-08T04:26:37,099 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] snapshot.SnapshotManifest(246): Creating references for hfiles
2024-12-08T04:26:37,100 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithTargetName/de88e9e2ac98bd5abe6e5139a280170f/cf/e66bcfe7d1b6425c8cbbaa4be086b757] hfiles
2024-12-08T04:26:37,100 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithTargetName/de88e9e2ac98bd5abe6e5139a280170f/cf/e66bcfe7d1b6425c8cbbaa4be086b757 for snapshot=snaptb0-testExportWithTargetName
2024-12-08T04:26:37,106 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithTargetName/2be016cf061cf04bc5ed902016276d60/cf/10f386223b5e4bd49dc659e4bde85c61, entries=4, sequenceid=6, filesize=5.2 K
2024-12-08T04:26:37,107 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 2be016cf061cf04bc5ed902016276d60 in 259ms, sequenceid=6, compaction requested=false
2024-12-08T04:26:37,107 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for 2be016cf061cf04bc5ed902016276d60:

2024-12-08T04:26:37,107 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1733631994644.2be016cf061cf04bc5ed902016276d60. for snaptb0-testExportWithTargetName completed.
2024-12-08T04:26:37,107 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1733631994644.2be016cf061cf04bc5ed902016276d60.' region-info for snapshot=snaptb0-testExportWithTargetName
2024-12-08T04:26:37,108 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] snapshot.SnapshotManifest(246): Creating references for hfiles
2024-12-08T04:26:37,108 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithTargetName/2be016cf061cf04bc5ed902016276d60/cf/10f386223b5e4bd49dc659e4bde85c61] hfiles
2024-12-08T04:26:37,108 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithTargetName/2be016cf061cf04bc5ed902016276d60/cf/10f386223b5e4bd49dc659e4bde85c61 for snapshot=snaptb0-testExportWithTargetName
2024-12-08T04:26:37,141 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741854_1030 (size=109)
2024-12-08T04:26:37,142 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741854_1030 (size=109)
2024-12-08T04:26:37,143 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741854_1030 (size=109)
2024-12-08T04:26:37,144 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1733631994644.de88e9e2ac98bd5abe6e5139a280170f.
2024-12-08T04:26:37,144 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=22
2024-12-08T04:26:37,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] master.HMaster(4106): Remote procedure done, pid=22
2024-12-08T04:26:37,146 INFO  [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region de88e9e2ac98bd5abe6e5139a280170f
2024-12-08T04:26:37,146 INFO  [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=22, ppid=20, state=RUNNABLE; SnapshotRegionProcedure de88e9e2ac98bd5abe6e5139a280170f
2024-12-08T04:26:37,152 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=22, ppid=20, state=SUCCESS; SnapshotRegionProcedure de88e9e2ac98bd5abe6e5139a280170f in 470 msec
2024-12-08T04:26:37,159 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741855_1031 (size=109)
2024-12-08T04:26:37,160 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741855_1031 (size=109)
2024-12-08T04:26:37,162 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741855_1031 (size=109)
2024-12-08T04:26:37,164 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1733631994644.2be016cf061cf04bc5ed902016276d60.
2024-12-08T04:26:37,164 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21
2024-12-08T04:26:37,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] master.HMaster(4106): Remote procedure done, pid=21
2024-12-08T04:26:37,165 INFO  [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region 2be016cf061cf04bc5ed902016276d60
2024-12-08T04:26:37,165 INFO  [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=21, ppid=20, state=RUNNABLE; SnapshotRegionProcedure 2be016cf061cf04bc5ed902016276d60
2024-12-08T04:26:37,173 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=21, resume processing ppid=20
2024-12-08T04:26:37,173 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; SnapshotRegionProcedure 2be016cf061cf04bc5ed902016276d60 in 489 msec
2024-12-08T04:26:37,173 INFO  [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS
2024-12-08T04:26:37,175 INFO  [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION
2024-12-08T04:26:37,177 INFO  [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT
2024-12-08T04:26:37,177 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithTargetName
2024-12-08T04:26:37,179 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName
2024-12-08T04:26:37,214 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741856_1032 (size=627)
2024-12-08T04:26:37,215 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741856_1032 (size=627)
2024-12-08T04:26:37,215 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741856_1032 (size=627)
2024-12-08T04:26:37,223 INFO  [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT
2024-12-08T04:26:37,235 INFO  [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT
2024-12-08T04:26:37,238 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/snaptb0-testExportWithTargetName
2024-12-08T04:26:37,249 INFO  [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION
2024-12-08T04:26:37,249 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 20
2024-12-08T04:26:37,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20
2024-12-08T04:26:37,253 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 613 msec
2024-12-08T04:26:37,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20
2024-12-08T04:26:37,755 INFO  [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName, procId: 20 completed
2024-12-08T04:26:37,755 INFO  [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733631997755
2024-12-08T04:26:37,756 INFO  [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:41407, tgtDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733631997755, rawTgtDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733631997755, srcFsUri=hdfs://localhost:41407, srcDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720
2024-12-08T04:26:37,828 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:41407, inputRoot=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720
2024-12-08T04:26:37,828 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1548841327_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733631997755, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733631997755/.hbase-snapshot/.tmp/testExportWithTargetName
2024-12-08T04:26:37,835 INFO  [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity.
2024-12-08T04:26:37,848 INFO  [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/snaptb0-testExportWithTargetName to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733631997755/.hbase-snapshot/.tmp/testExportWithTargetName
2024-12-08T04:26:37,879 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741857_1033 (size=162)
2024-12-08T04:26:37,879 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741857_1033 (size=162)
2024-12-08T04:26:37,880 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741857_1033 (size=162)
2024-12-08T04:26:37,901 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741858_1034 (size=627)
2024-12-08T04:26:37,901 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741858_1034 (size=627)
2024-12-08T04:26:37,902 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741858_1034 (size=627)
2024-12-08T04:26:37,932 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741859_1035 (size=154)
2024-12-08T04:26:37,932 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741859_1035 (size=154)
2024-12-08T04:26:37,933 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741859_1035 (size=154)
2024-12-08T04:26:37,943 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar
2024-12-08T04:26:37,944 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar
2024-12-08T04:26:37,944 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar
2024-12-08T04:26:37,945 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar
2024-12-08T04:26:39,387 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/hadoop-6137839868749806721.jar
2024-12-08T04:26:39,388 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar
2024-12-08T04:26:39,389 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar
2024-12-08T04:26:39,481 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/hadoop-2183354958793207134.jar
2024-12-08T04:26:39,482 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar
2024-12-08T04:26:39,483 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar
2024-12-08T04:26:39,483 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar
2024-12-08T04:26:39,484 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar
2024-12-08T04:26:39,484 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar
2024-12-08T04:26:39,485 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar
2024-12-08T04:26:39,485 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar
2024-12-08T04:26:39,485 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar
2024-12-08T04:26:39,486 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar
2024-12-08T04:26:39,486 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar
2024-12-08T04:26:39,487 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar
2024-12-08T04:26:39,487 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar
2024-12-08T04:26:39,488 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar
2024-12-08T04:26:39,488 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar
2024-12-08T04:26:39,489 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar
2024-12-08T04:26:39,489 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar
2024-12-08T04:26:39,489 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar
2024-12-08T04:26:39,490 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar
2024-12-08T04:26:39,493 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar
2024-12-08T04:26:39,493 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar
2024-12-08T04:26:39,494 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar
2024-12-08T04:26:39,494 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar
2024-12-08T04:26:39,495 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar
2024-12-08T04:26:39,495 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar
2024-12-08T04:26:39,496 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar
2024-12-08T04:26:39,778 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741860_1036 (size=127628)
2024-12-08T04:26:39,779 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741860_1036 (size=127628)
2024-12-08T04:26:39,780 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741860_1036 (size=127628)
2024-12-08T04:26:40,003 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741861_1037 (size=2172101)
2024-12-08T04:26:40,005 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741861_1037 (size=2172101)
2024-12-08T04:26:40,005 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741861_1037 (size=2172101)
2024-12-08T04:26:40,094 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741862_1038 (size=213228)
2024-12-08T04:26:40,094 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741862_1038 (size=213228)
2024-12-08T04:26:40,105 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741862_1038 (size=213228)
2024-12-08T04:26:40,207 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741863_1039 (size=1877034)
2024-12-08T04:26:40,208 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741863_1039 (size=1877034)
2024-12-08T04:26:40,208 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741863_1039 (size=1877034)
2024-12-08T04:26:40,265 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741864_1040 (size=533455)
2024-12-08T04:26:40,265 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741864_1040 (size=533455)
2024-12-08T04:26:40,266 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741864_1040 (size=533455)
2024-12-08T04:26:40,369 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741865_1041 (size=7280644)
2024-12-08T04:26:40,369 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741865_1041 (size=7280644)
2024-12-08T04:26:40,370 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741865_1041 (size=7280644)
2024-12-08T04:26:40,493 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741866_1042 (size=4188619)
2024-12-08T04:26:40,494 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741866_1042 (size=4188619)
2024-12-08T04:26:40,494 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741866_1042 (size=4188619)
2024-12-08T04:26:40,579 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741867_1043 (size=20406)
2024-12-08T04:26:40,579 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741867_1043 (size=20406)
2024-12-08T04:26:40,581 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741867_1043 (size=20406)
2024-12-08T04:26:40,626 WARN  [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties
2024-12-08T04:26:40,714 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741868_1044 (size=75495)
2024-12-08T04:26:40,715 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741868_1044 (size=75495)
2024-12-08T04:26:40,715 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741868_1044 (size=75495)
2024-12-08T04:26:40,799 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741869_1045 (size=45609)
2024-12-08T04:26:40,800 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741869_1045 (size=45609)
2024-12-08T04:26:40,801 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741869_1045 (size=45609)
2024-12-08T04:26:40,830 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741870_1046 (size=110084)
2024-12-08T04:26:40,831 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741870_1046 (size=110084)
2024-12-08T04:26:40,832 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741870_1046 (size=110084)
2024-12-08T04:26:40,860 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741871_1047 (size=451756)
2024-12-08T04:26:40,862 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741871_1047 (size=451756)
2024-12-08T04:26:40,862 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741871_1047 (size=451756)
2024-12-08T04:26:40,898 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741872_1048 (size=1323991)
2024-12-08T04:26:40,900 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741872_1048 (size=1323991)
2024-12-08T04:26:40,909 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741872_1048 (size=1323991)
2024-12-08T04:26:40,929 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741873_1049 (size=23076)
2024-12-08T04:26:40,932 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741873_1049 (size=23076)
2024-12-08T04:26:40,932 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741873_1049 (size=23076)
2024-12-08T04:26:40,979 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741874_1050 (size=126803)
2024-12-08T04:26:40,980 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741874_1050 (size=126803)
2024-12-08T04:26:40,981 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741874_1050 (size=126803)
2024-12-08T04:26:41,012 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741875_1051 (size=322274)
2024-12-08T04:26:41,013 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741875_1051 (size=322274)
2024-12-08T04:26:41,018 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741875_1051 (size=322274)
2024-12-08T04:26:41,071 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741876_1052 (size=6350155)
2024-12-08T04:26:41,071 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741876_1052 (size=6350155)
2024-12-08T04:26:41,072 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741876_1052 (size=6350155)
2024-12-08T04:26:41,099 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741877_1053 (size=1832290)
2024-12-08T04:26:41,099 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741877_1053 (size=1832290)
2024-12-08T04:26:41,100 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741877_1053 (size=1832290)
2024-12-08T04:26:41,129 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741878_1054 (size=30081)
2024-12-08T04:26:41,129 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741878_1054 (size=30081)
2024-12-08T04:26:41,131 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741878_1054 (size=30081)
2024-12-08T04:26:41,164 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741879_1055 (size=53616)
2024-12-08T04:26:41,165 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741879_1055 (size=53616)
2024-12-08T04:26:41,165 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741879_1055 (size=53616)
2024-12-08T04:26:41,195 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741880_1056 (size=29229)
2024-12-08T04:26:41,196 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741880_1056 (size=29229)
2024-12-08T04:26:41,196 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741880_1056 (size=29229)
2024-12-08T04:26:41,218 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741881_1057 (size=169089)
2024-12-08T04:26:41,219 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741881_1057 (size=169089)
2024-12-08T04:26:41,219 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741881_1057 (size=169089)
2024-12-08T04:26:41,263 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741882_1058 (size=5175431)
2024-12-08T04:26:41,264 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741882_1058 (size=5175431)
2024-12-08T04:26:41,265 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741882_1058 (size=5175431)
2024-12-08T04:26:41,330 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741883_1059 (size=136454)
2024-12-08T04:26:41,333 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741883_1059 (size=136454)
2024-12-08T04:26:41,333 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741883_1059 (size=136454)
2024-12-08T04:26:41,372 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741884_1060 (size=907852)
2024-12-08T04:26:41,374 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741884_1060 (size=907852)
2024-12-08T04:26:41,374 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741884_1060 (size=907852)
2024-12-08T04:26:41,455 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741885_1061 (size=3317408)
2024-12-08T04:26:41,455 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741885_1061 (size=3317408)
2024-12-08T04:26:41,456 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741885_1061 (size=3317408)
2024-12-08T04:26:41,494 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741886_1062 (size=503880)
2024-12-08T04:26:41,495 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741886_1062 (size=503880)
2024-12-08T04:26:41,496 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741886_1062 (size=503880)
2024-12-08T04:26:41,554 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741887_1063 (size=4695811)
2024-12-08T04:26:41,554 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741887_1063 (size=4695811)
2024-12-08T04:26:41,555 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741887_1063 (size=4695811)
2024-12-08T04:26:41,558 WARN  [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set.  User classes may not be found. See Job or Job#setJar(String).
2024-12-08T04:26:41,564 INFO  [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testExportWithTargetName' hfile list
2024-12-08T04:26:41,572 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K
2024-12-08T04:26:41,625 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741888_1064 (size=342)
2024-12-08T04:26:41,628 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741888_1064 (size=342)
2024-12-08T04:26:41,631 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741888_1064 (size=342)
2024-12-08T04:26:41,670 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741889_1065 (size=15)
2024-12-08T04:26:41,673 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741889_1065 (size=15)
2024-12-08T04:26:41,674 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741889_1065 (size=15)
2024-12-08T04:26:41,847 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741890_1066 (size=304886)
2024-12-08T04:26:41,848 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741890_1066 (size=304886)
2024-12-08T04:26:41,848 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741890_1066 (size=304886)
2024-12-08T04:26:42,246 WARN  [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start
2024-12-08T04:26:42,247 WARN  [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start
2024-12-08T04:26:42,680 INFO  [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733631992429_0001_000001 (auth:SIMPLE) from 127.0.0.1:49208
2024-12-08T04:26:43,636 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName
2024-12-08T04:26:43,636 INFO  [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName Metrics about Tables on a single HBase RegionServer
2024-12-08T04:26:51,467 INFO  [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733631992429_0001_000001 (auth:SIMPLE) from 127.0.0.1:47720
2024-12-08T04:26:51,799 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741891_1067 (size=350560)
2024-12-08T04:26:51,799 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741891_1067 (size=350560)
2024-12-08T04:26:51,800 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741891_1067 (size=350560)
2024-12-08T04:26:52,002 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details.
2024-12-08T04:26:53,815 INFO  [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733631992429_0001_000001 (auth:SIMPLE) from 127.0.0.1:40462
2024-12-08T04:26:58,426 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false
2024-12-08T04:26:58,428 INFO  [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55120, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService
2024-12-08T04:26:58,447 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741892_1068 (size=8258)
2024-12-08T04:26:58,447 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741892_1068 (size=8258)
2024-12-08T04:26:58,448 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741892_1068 (size=8258)
2024-12-08T04:26:58,513 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741893_1069 (size=5356)
2024-12-08T04:26:58,514 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741893_1069 (size=5356)
2024-12-08T04:26:58,514 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741893_1069 (size=5356)
2024-12-08T04:26:58,595 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741894_1070 (size=17419)
2024-12-08T04:26:58,596 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741894_1070 (size=17419)
2024-12-08T04:26:58,597 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741894_1070 (size=17419)
2024-12-08T04:26:58,614 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741895_1071 (size=464)
2024-12-08T04:26:58,616 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741895_1071 (size=464)
2024-12-08T04:26:58,616 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741895_1071 (size=464)
2024-12-08T04:26:58,653 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741896_1072 (size=17419)
2024-12-08T04:26:58,654 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741896_1072 (size=17419)
2024-12-08T04:26:58,654 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741896_1072 (size=17419)
2024-12-08T04:26:58,676 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741897_1073 (size=350560)
2024-12-08T04:26:58,676 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741897_1073 (size=350560)
2024-12-08T04:26:58,676 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741897_1073 (size=350560)
2024-12-08T04:26:58,692 INFO  [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733631992429_0001_000001 (auth:SIMPLE) from 127.0.0.1:40474
2024-12-08T04:26:58,757 WARN  [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-1_1/usercache/jenkins/appcache/application_1733631992429_0001/container_1733631992429_0001_01_000002/launch_container.sh]
2024-12-08T04:26:58,758 WARN  [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-1_1/usercache/jenkins/appcache/application_1733631992429_0001/container_1733631992429_0001_01_000002/container_tokens]
2024-12-08T04:26:58,758 WARN  [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-1_1/usercache/jenkins/appcache/application_1733631992429_0001/container_1733631992429_0001_01_000002/sysfs]
2024-12-08T04:26:59,423 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false
2024-12-08T04:26:59,424 INFO  [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55136, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService
2024-12-08T04:26:59,431 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false
2024-12-08T04:26:59,432 INFO  [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55148, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService
2024-12-08T04:27:00,631 INFO  [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export
2024-12-08T04:27:00,632 INFO  [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity.
2024-12-08T04:27:00,643 INFO  [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: testExportWithTargetName
2024-12-08T04:27:00,643 INFO  [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot
2024-12-08T04:27:00,644 INFO  [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state
2024-12-08T04:27:00,644 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1548841327_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/snaptb0-testExportWithTargetName at hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/snaptb0-testExportWithTargetName
2024-12-08T04:27:00,645 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/snaptb0-testExportWithTargetName/.snapshotinfo
2024-12-08T04:27:00,645 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/snaptb0-testExportWithTargetName/data.manifest
2024-12-08T04:27:00,645 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1548841327_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733631997755/.hbase-snapshot/testExportWithTargetName at hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733631997755/.hbase-snapshot/testExportWithTargetName
2024-12-08T04:27:00,646 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733631997755/.hbase-snapshot/testExportWithTargetName/.snapshotinfo
2024-12-08T04:27:00,646 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733631997755/.hbase-snapshot/testExportWithTargetName/data.manifest
2024-12-08T04:27:00,658 INFO  [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportWithTargetName
2024-12-08T04:27:00,662 INFO  [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportWithTargetName
2024-12-08T04:27:00,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] procedure2.ProcedureExecutor(1098): Stored pid=23, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportWithTargetName
2024-12-08T04:27:00,672 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733632020671"}]},"ts":"1733632020671"}
2024-12-08T04:27:00,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=23
2024-12-08T04:27:00,674 INFO  [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithTargetName, state=DISABLING in hbase:meta
2024-12-08T04:27:00,677 INFO  [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithTargetName to state=DISABLING
2024-12-08T04:27:00,679 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=24, ppid=23, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportWithTargetName}]
2024-12-08T04:27:00,685 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=25, ppid=24, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=2be016cf061cf04bc5ed902016276d60, UNASSIGN}, {pid=26, ppid=24, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=de88e9e2ac98bd5abe6e5139a280170f, UNASSIGN}]
2024-12-08T04:27:00,686 INFO  [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=26, ppid=24, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=de88e9e2ac98bd5abe6e5139a280170f, UNASSIGN
2024-12-08T04:27:00,687 INFO  [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=25, ppid=24, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=2be016cf061cf04bc5ed902016276d60, UNASSIGN
2024-12-08T04:27:00,688 INFO  [PEWorker-5 {}] assignment.RegionStateStore(202): pid=25 updating hbase:meta row=2be016cf061cf04bc5ed902016276d60, regionState=CLOSING, regionLocation=428ded7e54d6,41743,1733631984189
2024-12-08T04:27:00,688 INFO  [PEWorker-1 {}] assignment.RegionStateStore(202): pid=26 updating hbase:meta row=de88e9e2ac98bd5abe6e5139a280170f, regionState=CLOSING, regionLocation=428ded7e54d6,46421,1733631984115
2024-12-08T04:27:00,689 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false
2024-12-08T04:27:00,690 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=27, ppid=25, state=RUNNABLE; CloseRegionProcedure 2be016cf061cf04bc5ed902016276d60, server=428ded7e54d6,41743,1733631984189}]
2024-12-08T04:27:00,691 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false
2024-12-08T04:27:00,693 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=28, ppid=26, state=RUNNABLE; CloseRegionProcedure de88e9e2ac98bd5abe6e5139a280170f, server=428ded7e54d6,46421,1733631984115}]
2024-12-08T04:27:00,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=23
2024-12-08T04:27:00,846 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,41743,1733631984189
2024-12-08T04:27:00,846 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,46421,1733631984115
2024-12-08T04:27:00,848 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] handler.UnassignRegionHandler(124): Close 2be016cf061cf04bc5ed902016276d60
2024-12-08T04:27:00,848 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] handler.UnassignRegionHandler(124): Close de88e9e2ac98bd5abe6e5139a280170f
2024-12-08T04:27:00,848 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false
2024-12-08T04:27:00,848 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false
2024-12-08T04:27:00,849 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1681): Closing de88e9e2ac98bd5abe6e5139a280170f, disabling compactions & flushes
2024-12-08T04:27:00,849 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1681): Closing 2be016cf061cf04bc5ed902016276d60, disabling compactions & flushes
2024-12-08T04:27:00,850 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1703): Closing region testtb-testExportWithTargetName,1,1733631994644.de88e9e2ac98bd5abe6e5139a280170f.
2024-12-08T04:27:00,850 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1703): Closing region testtb-testExportWithTargetName,,1733631994644.2be016cf061cf04bc5ed902016276d60.
2024-12-08T04:27:00,850 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithTargetName,1,1733631994644.de88e9e2ac98bd5abe6e5139a280170f.
2024-12-08T04:27:00,850 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithTargetName,,1733631994644.2be016cf061cf04bc5ed902016276d60.
2024-12-08T04:27:00,850 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithTargetName,,1733631994644.2be016cf061cf04bc5ed902016276d60. after waiting 0 ms
2024-12-08T04:27:00,850 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithTargetName,1,1733631994644.de88e9e2ac98bd5abe6e5139a280170f. after waiting 0 ms
2024-12-08T04:27:00,850 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithTargetName,,1733631994644.2be016cf061cf04bc5ed902016276d60.
2024-12-08T04:27:00,850 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithTargetName,1,1733631994644.de88e9e2ac98bd5abe6e5139a280170f.
2024-12-08T04:27:00,861 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithTargetName/de88e9e2ac98bd5abe6e5139a280170f/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1
2024-12-08T04:27:00,864 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithTargetName/2be016cf061cf04bc5ed902016276d60/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1
2024-12-08T04:27:00,873 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController
2024-12-08T04:27:00,874 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1922): Closed testtb-testExportWithTargetName,1,1733631994644.de88e9e2ac98bd5abe6e5139a280170f.
2024-12-08T04:27:00,874 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1635): Region close journal for de88e9e2ac98bd5abe6e5139a280170f:

2024-12-08T04:27:00,874 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController
2024-12-08T04:27:00,875 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1922): Closed testtb-testExportWithTargetName,,1733631994644.2be016cf061cf04bc5ed902016276d60.
2024-12-08T04:27:00,875 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1635): Region close journal for 2be016cf061cf04bc5ed902016276d60:

2024-12-08T04:27:00,878 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] handler.UnassignRegionHandler(170): Closed de88e9e2ac98bd5abe6e5139a280170f
2024-12-08T04:27:00,879 INFO  [PEWorker-4 {}] assignment.RegionStateStore(202): pid=26 updating hbase:meta row=de88e9e2ac98bd5abe6e5139a280170f, regionState=CLOSED
2024-12-08T04:27:00,880 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] handler.UnassignRegionHandler(170): Closed 2be016cf061cf04bc5ed902016276d60
2024-12-08T04:27:00,882 INFO  [PEWorker-5 {}] assignment.RegionStateStore(202): pid=25 updating hbase:meta row=2be016cf061cf04bc5ed902016276d60, regionState=CLOSED
2024-12-08T04:27:00,888 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=28, resume processing ppid=26
2024-12-08T04:27:00,888 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=28, ppid=26, state=SUCCESS; CloseRegionProcedure de88e9e2ac98bd5abe6e5139a280170f, server=428ded7e54d6,46421,1733631984115 in 191 msec
2024-12-08T04:27:00,890 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=27, resume processing ppid=25
2024-12-08T04:27:00,890 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=26, ppid=24, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=de88e9e2ac98bd5abe6e5139a280170f, UNASSIGN in 203 msec
2024-12-08T04:27:00,890 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=27, ppid=25, state=SUCCESS; CloseRegionProcedure 2be016cf061cf04bc5ed902016276d60, server=428ded7e54d6,41743,1733631984189 in 194 msec
2024-12-08T04:27:00,893 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=25, resume processing ppid=24
2024-12-08T04:27:00,893 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=25, ppid=24, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=2be016cf061cf04bc5ed902016276d60, UNASSIGN in 205 msec
2024-12-08T04:27:00,897 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=24, resume processing ppid=23
2024-12-08T04:27:00,897 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=24, ppid=23, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportWithTargetName in 215 msec
2024-12-08T04:27:00,899 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733632020899"}]},"ts":"1733632020899"}
2024-12-08T04:27:00,901 INFO  [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithTargetName, state=DISABLED in hbase:meta
2024-12-08T04:27:00,904 INFO  [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithTargetName to state=DISABLED
2024-12-08T04:27:00,907 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=23, state=SUCCESS; DisableTableProcedure table=testtb-testExportWithTargetName in 241 msec
2024-12-08T04:27:00,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=23
2024-12-08T04:27:00,975 INFO  [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportWithTargetName, procId: 23 completed
2024-12-08T04:27:00,980 INFO  [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportWithTargetName
2024-12-08T04:27:00,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] procedure2.ProcedureExecutor(1098): Stored pid=29, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportWithTargetName
2024-12-08T04:27:00,988 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=29, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportWithTargetName
2024-12-08T04:27:00,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportWithTargetName
2024-12-08T04:27:00,990 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=29, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportWithTargetName
2024-12-08T04:27:00,996 INFO  [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41743 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportWithTargetName
2024-12-08T04:27:01,000 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName
2024-12-08T04:27:01,000 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46421-0x1006fe072e80002, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName
2024-12-08T04:27:01,000 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName
2024-12-08T04:27:01,000 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName
2024-12-08T04:27:01,001 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF
2024-12-08T04:27:01,001 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF
2024-12-08T04:27:01,001 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF
2024-12-08T04:27:01,001 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF
2024-12-08T04:27:01,004 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName
2024-12-08T04:27:01,004 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:27:01,004 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46421-0x1006fe072e80002, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName
2024-12-08T04:27:01,004 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46421-0x1006fe072e80002, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:27:01,004 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName
2024-12-08T04:27:01,004 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:27:01,005 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName
2024-12-08T04:27:01,005 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:27:01,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=29
2024-12-08T04:27:01,012 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithTargetName/2be016cf061cf04bc5ed902016276d60
2024-12-08T04:27:01,019 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithTargetName/2be016cf061cf04bc5ed902016276d60/cf, FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithTargetName/2be016cf061cf04bc5ed902016276d60/recovered.edits]
2024-12-08T04:27:01,021 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithTargetName/de88e9e2ac98bd5abe6e5139a280170f
2024-12-08T04:27:01,026 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithTargetName/de88e9e2ac98bd5abe6e5139a280170f/cf, FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithTargetName/de88e9e2ac98bd5abe6e5139a280170f/recovered.edits]
2024-12-08T04:27:01,035 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithTargetName/2be016cf061cf04bc5ed902016276d60/cf/10f386223b5e4bd49dc659e4bde85c61 to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/archive/data/default/testtb-testExportWithTargetName/2be016cf061cf04bc5ed902016276d60/cf/10f386223b5e4bd49dc659e4bde85c61
2024-12-08T04:27:01,035 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithTargetName/de88e9e2ac98bd5abe6e5139a280170f/cf/e66bcfe7d1b6425c8cbbaa4be086b757 to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/archive/data/default/testtb-testExportWithTargetName/de88e9e2ac98bd5abe6e5139a280170f/cf/e66bcfe7d1b6425c8cbbaa4be086b757
2024-12-08T04:27:01,042 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithTargetName/2be016cf061cf04bc5ed902016276d60/recovered.edits/9.seqid to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/archive/data/default/testtb-testExportWithTargetName/2be016cf061cf04bc5ed902016276d60/recovered.edits/9.seqid
2024-12-08T04:27:01,043 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithTargetName/2be016cf061cf04bc5ed902016276d60
2024-12-08T04:27:01,045 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithTargetName/de88e9e2ac98bd5abe6e5139a280170f/recovered.edits/9.seqid to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/archive/data/default/testtb-testExportWithTargetName/de88e9e2ac98bd5abe6e5139a280170f/recovered.edits/9.seqid
2024-12-08T04:27:01,046 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithTargetName/de88e9e2ac98bd5abe6e5139a280170f
2024-12-08T04:27:01,046 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithTargetName regions
2024-12-08T04:27:01,058 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=29, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportWithTargetName
2024-12-08T04:27:01,067 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41743 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms
2024-12-08T04:27:01,079 WARN  [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithTargetName from hbase:meta
2024-12-08T04:27:01,089 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithTargetName' descriptor.
2024-12-08T04:27:01,092 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=29, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportWithTargetName
2024-12-08T04:27:01,092 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithTargetName' from region states.
2024-12-08T04:27:01,092 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,,1733631994644.2be016cf061cf04bc5ed902016276d60.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733632021092"}]},"ts":"9223372036854775807"}
2024-12-08T04:27:01,092 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,1,1733631994644.de88e9e2ac98bd5abe6e5139a280170f.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733632021092"}]},"ts":"9223372036854775807"}
2024-12-08T04:27:01,096 INFO  [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META
2024-12-08T04:27:01,096 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 2be016cf061cf04bc5ed902016276d60, NAME => 'testtb-testExportWithTargetName,,1733631994644.2be016cf061cf04bc5ed902016276d60.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => de88e9e2ac98bd5abe6e5139a280170f, NAME => 'testtb-testExportWithTargetName,1,1733631994644.de88e9e2ac98bd5abe6e5139a280170f.', STARTKEY => '1', ENDKEY => ''}]
2024-12-08T04:27:01,097 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithTargetName' as deleted.
2024-12-08T04:27:01,097 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733632021097"}]},"ts":"9223372036854775807"}
2024-12-08T04:27:01,103 INFO  [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportWithTargetName state from META
2024-12-08T04:27:01,108 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=29, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportWithTargetName
2024-12-08T04:27:01,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=29
2024-12-08T04:27:01,113 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=29, state=SUCCESS; DeleteTableProcedure table=testtb-testExportWithTargetName in 127 msec
2024-12-08T04:27:01,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=29
2024-12-08T04:27:01,313 INFO  [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportWithTargetName, procId: 29 completed
2024-12-08T04:27:01,329 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithTargetName"

2024-12-08T04:27:01,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportWithTargetName
2024-12-08T04:27:01,334 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithTargetName"

2024-12-08T04:27:01,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportWithTargetName
2024-12-08T04:27:01,364 INFO  [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithTargetName Thread=770 (was 718)
Potentially hanging thread: zk-permission-watcher-pool-0
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: DeletionService #0
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
	java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170)
	java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: RS_OPEN_REGION-regionserver/428ded7e54d6:0-0
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: zk-permission-watcher-pool-0
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data1
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1548841327_22 at /127.0.0.1:34292 [Waiting for operation #2]
	java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
	java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
	app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335)
	app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131)
	java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244)
	java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263)
	java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334)
	java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312)
	app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72)
	app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: htable-pool-1
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401)
	java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: hconnection-0x28111a62-shared-pool-6
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1548841327_22 at /127.0.0.1:60548 [Waiting for operation #4]
	java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
	java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
	app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335)
	app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131)
	java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244)
	java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263)
	java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334)
	java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312)
	app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72)
	app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: RS-EventLoopGroup-4-3
	app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method)
	app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220)
	app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213)
	app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308)
	app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365)
	app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
	app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
	app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: hconnection-0x28111a62-shared-pool-5
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: Container metrics unregistration
	java.base@17.0.11/java.lang.Object.wait(Native Method)
	java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563)
	java.base@17.0.11/java.util.TimerThread.run(Timer.java:516)

Potentially hanging thread: RPCClient-NioEventLoopGroup-6-3
	java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
	java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
	app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62)
	app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883)
	app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526)
	app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
	app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
	app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1548841327_22 at /127.0.0.1:58632 [Waiting for operation #6]
	java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
	java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
	app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335)
	app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131)
	java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244)
	java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263)
	java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334)
	java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312)
	app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72)
	app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35083
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704)
	java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903)
	app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: IPC Client (30462390) connection to localhost/127.0.0.1:35083 from jenkins
	java.base@17.0.11/java.lang.Object.wait(Native Method)
	app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042)
	app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093)

Potentially hanging thread: DeletionService #2
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
	java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170)
	java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: hconnection-0x28111a62-shared-pool-8
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data3
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: ContainersLauncher #0
	java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method)
	java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276)
	java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282)
	java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343)
	java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281)
	java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324)
	java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189)
	java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177)
	java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162)
	java.base@17.0.11/java.io.BufferedReader.read1(BufferedReader.java:213)
	java.base@17.0.11/java.io.BufferedReader.read(BufferedReader.java:287)
	app//org.apache.hadoop.util.Shell$ShellCommandExecutor.parseExecResult(Shell.java:1295)
	app//org.apache.hadoop.util.Shell.runCommand(Shell.java:1054)
	app//org.apache.hadoop.util.Shell.run(Shell.java:959)
	app//org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:1282)
	app//org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor.launchContainer(DefaultContainerExecutor.java:349)
	app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.launchContainer(ContainerLaunch.java:600)
	app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:388)
	app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:105)
	java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: DeletionService #3
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
	java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170)
	java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: ContainersLauncher #2
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401)
	java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34619
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704)
	java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903)
	app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: zk-permission-watcher-pool-0
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37669
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704)
	java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903)
	app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: ApplicationMasterLauncher #0
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: zk-permission-watcher-pool-0
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: htable-pool-0
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401)
	java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2011439612_1 at /127.0.0.1:60520 [Waiting for operation #2]
	java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
	java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
	app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335)
	app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131)
	java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244)
	java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263)
	java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334)
	java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312)
	app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72)
	app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data2
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data4
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: ForkJoinPool.commonPool-worker-3
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623)
	java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165)

Potentially hanging thread: RPCClient-NioEventLoopGroup-6-2
	java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
	java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
	app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62)
	app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883)
	app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526)
	app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
	app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
	app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: RS_OPEN_REGION-regionserver/428ded7e54d6:0-0
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: IPC Client (30462390) connection to localhost/127.0.0.1:37669 from jenkins
	java.base@17.0.11/java.lang.Object.wait(Native Method)
	app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042)
	app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093)

Potentially hanging thread: Thread-1300
	java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method)
	java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276)
	java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282)
	java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343)
	java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281)
	java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324)
	java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189)
	java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177)
	java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162)
	java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329)
	java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396)
	app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025)

Potentially hanging thread: ContainersLauncher #1
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401)
	java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: hconnection-0x28111a62-shared-pool-7
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: HFileArchiver-2
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: HFileArchiver-1
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: RS-EventLoopGroup-4-2
	app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method)
	app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220)
	app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213)
	app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308)
	app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365)
	app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
	app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
	app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: process reaper (pid 17819)
	java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method)
	java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: DeletionService #1
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
	java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170)
	java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data6
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data5
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2011439612_1 at /127.0.0.1:58610 [Waiting for operation #2]
	java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
	java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
	app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335)
	app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131)
	java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244)
	java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263)
	java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334)
	java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312)
	app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72)
	app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
 - Thread LEAK? -, OpenFileDescriptor=805 (was 779) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=544 (was 383) - SystemLoadAverage LEAK? -, ProcessCount=17 (was 11) - ProcessCount LEAK? -, AvailableMemoryMB=4633 (was 6148)
2024-12-08T04:27:01,365 WARN  [Time-limited test {}] hbase.ResourceChecker(130): Thread=770 is superior to 500
2024-12-08T04:27:01,389 INFO  [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithResetTtl Thread=770, OpenFileDescriptor=805, MaxFileDescriptor=1048576, SystemLoadAverage=544, ProcessCount=17, AvailableMemoryMB=4631
2024-12-08T04:27:01,389 WARN  [Time-limited test {}] hbase.ResourceChecker(130): Thread=770 is superior to 500
2024-12-08T04:27:01,391 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}
2024-12-08T04:27:01,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure2.ProcedureExecutor(1098): Stored pid=30, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportWithResetTtl
2024-12-08T04:27:01,394 INFO  [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=30, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION
2024-12-08T04:27:01,394 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker
2024-12-08T04:27:01,394 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default"
qualifier: "testtb-testExportWithResetTtl"
 procId is: 30
2024-12-08T04:27:01,395 INFO  [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=30, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT
2024-12-08T04:27:01,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30
2024-12-08T04:27:01,405 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741898_1074 (size=404)
2024-12-08T04:27:01,406 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741898_1074 (size=404)
2024-12-08T04:27:01,406 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741898_1074 (size=404)
2024-12-08T04:27:01,408 INFO  [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 4f0c309d68305294d60edeb45abcb216, NAME => 'testtb-testExportWithResetTtl,,1733632021390.4f0c309d68305294d60edeb45abcb216.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720
2024-12-08T04:27:01,416 INFO  [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 640fdbcf8bc742a7d599b5b4cf201f23, NAME => 'testtb-testExportWithResetTtl,1,1733632021390.640fdbcf8bc742a7d599b5b4cf201f23.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720
2024-12-08T04:27:01,427 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741899_1075 (size=65)
2024-12-08T04:27:01,427 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741899_1075 (size=65)
2024-12-08T04:27:01,427 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741899_1075 (size=65)
2024-12-08T04:27:01,428 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportWithResetTtl,,1733632021390.4f0c309d68305294d60edeb45abcb216.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable
2024-12-08T04:27:01,428 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1681): Closing 4f0c309d68305294d60edeb45abcb216, disabling compactions & flushes
2024-12-08T04:27:01,428 INFO  [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportWithResetTtl,,1733632021390.4f0c309d68305294d60edeb45abcb216.
2024-12-08T04:27:01,428 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithResetTtl,,1733632021390.4f0c309d68305294d60edeb45abcb216.
2024-12-08T04:27:01,429 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithResetTtl,,1733632021390.4f0c309d68305294d60edeb45abcb216. after waiting 0 ms
2024-12-08T04:27:01,429 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithResetTtl,,1733632021390.4f0c309d68305294d60edeb45abcb216.
2024-12-08T04:27:01,429 INFO  [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportWithResetTtl,,1733632021390.4f0c309d68305294d60edeb45abcb216.
2024-12-08T04:27:01,429 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1635): Region close journal for 4f0c309d68305294d60edeb45abcb216:

2024-12-08T04:27:01,433 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741900_1076 (size=65)
2024-12-08T04:27:01,434 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741900_1076 (size=65)
2024-12-08T04:27:01,434 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741900_1076 (size=65)
2024-12-08T04:27:01,436 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportWithResetTtl,1,1733632021390.640fdbcf8bc742a7d599b5b4cf201f23.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable
2024-12-08T04:27:01,436 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1681): Closing 640fdbcf8bc742a7d599b5b4cf201f23, disabling compactions & flushes
2024-12-08T04:27:01,436 INFO  [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportWithResetTtl,1,1733632021390.640fdbcf8bc742a7d599b5b4cf201f23.
2024-12-08T04:27:01,436 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithResetTtl,1,1733632021390.640fdbcf8bc742a7d599b5b4cf201f23.
2024-12-08T04:27:01,436 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithResetTtl,1,1733632021390.640fdbcf8bc742a7d599b5b4cf201f23. after waiting 0 ms
2024-12-08T04:27:01,436 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithResetTtl,1,1733632021390.640fdbcf8bc742a7d599b5b4cf201f23.
2024-12-08T04:27:01,437 INFO  [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportWithResetTtl,1,1733632021390.640fdbcf8bc742a7d599b5b4cf201f23.
2024-12-08T04:27:01,437 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1635): Region close journal for 640fdbcf8bc742a7d599b5b4cf201f23:

2024-12-08T04:27:01,439 INFO  [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=30, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META
2024-12-08T04:27:01,439 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,,1733632021390.4f0c309d68305294d60edeb45abcb216.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733632021439"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733632021439"}]},"ts":"1733632021439"}
2024-12-08T04:27:01,439 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,1,1733632021390.640fdbcf8bc742a7d599b5b4cf201f23.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733632021439"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733632021439"}]},"ts":"1733632021439"}
2024-12-08T04:27:01,442 INFO  [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta.
2024-12-08T04:27:01,443 INFO  [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=30, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS
2024-12-08T04:27:01,443 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733632021443"}]},"ts":"1733632021443"}
2024-12-08T04:27:01,445 INFO  [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithResetTtl, state=ENABLING in hbase:meta
2024-12-08T04:27:01,449 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(202): Hosts are {428ded7e54d6=0} racks are {/default-rack=0}
2024-12-08T04:27:01,451 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 is on host 0
2024-12-08T04:27:01,451 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 is on host 0
2024-12-08T04:27:01,451 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 is on host 0
2024-12-08T04:27:01,451 INFO  [PEWorker-3 {}] balancer.BalancerClusterState(314): server 0 is on rack 0
2024-12-08T04:27:01,451 INFO  [PEWorker-3 {}] balancer.BalancerClusterState(314): server 1 is on rack 0
2024-12-08T04:27:01,451 INFO  [PEWorker-3 {}] balancer.BalancerClusterState(314): server 2 is on rack 0
2024-12-08T04:27:01,451 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1
2024-12-08T04:27:01,451 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=31, ppid=30, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=4f0c309d68305294d60edeb45abcb216, ASSIGN}, {pid=32, ppid=30, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=640fdbcf8bc742a7d599b5b4cf201f23, ASSIGN}]
2024-12-08T04:27:01,454 INFO  [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=31, ppid=30, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=4f0c309d68305294d60edeb45abcb216, ASSIGN
2024-12-08T04:27:01,454 INFO  [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=32, ppid=30, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=640fdbcf8bc742a7d599b5b4cf201f23, ASSIGN
2024-12-08T04:27:01,455 INFO  [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=31, ppid=30, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=4f0c309d68305294d60edeb45abcb216, ASSIGN; state=OFFLINE, location=428ded7e54d6,45955,1733631983994; forceNewPlan=false, retain=false
2024-12-08T04:27:01,455 INFO  [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=32, ppid=30, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=640fdbcf8bc742a7d599b5b4cf201f23, ASSIGN; state=OFFLINE, location=428ded7e54d6,41743,1733631984189; forceNewPlan=false, retain=false
2024-12-08T04:27:01,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30
2024-12-08T04:27:01,606 INFO  [428ded7e54d6:46337 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 
2024-12-08T04:27:01,606 INFO  [PEWorker-5 {}] assignment.RegionStateStore(202): pid=32 updating hbase:meta row=640fdbcf8bc742a7d599b5b4cf201f23, regionState=OPENING, regionLocation=428ded7e54d6,41743,1733631984189
2024-12-08T04:27:01,606 INFO  [PEWorker-1 {}] assignment.RegionStateStore(202): pid=31 updating hbase:meta row=4f0c309d68305294d60edeb45abcb216, regionState=OPENING, regionLocation=428ded7e54d6,45955,1733631983994
2024-12-08T04:27:01,609 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE; OpenRegionProcedure 640fdbcf8bc742a7d599b5b4cf201f23, server=428ded7e54d6,41743,1733631984189}]
2024-12-08T04:27:01,610 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=34, ppid=31, state=RUNNABLE; OpenRegionProcedure 4f0c309d68305294d60edeb45abcb216, server=428ded7e54d6,45955,1733631983994}]
2024-12-08T04:27:01,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30
2024-12-08T04:27:01,763 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,41743,1733631984189
2024-12-08T04:27:01,763 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,45955,1733631983994
2024-12-08T04:27:01,763 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false
2024-12-08T04:27:01,766 INFO  [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59978, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService
2024-12-08T04:27:01,768 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] handler.AssignRegionHandler(135): Open testtb-testExportWithResetTtl,1,1733632021390.640fdbcf8bc742a7d599b5b4cf201f23.
2024-12-08T04:27:01,768 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(7285): Opening region: {ENCODED => 640fdbcf8bc742a7d599b5b4cf201f23, NAME => 'testtb-testExportWithResetTtl,1,1733632021390.640fdbcf8bc742a7d599b5b4cf201f23.', STARTKEY => '1', ENDKEY => ''}
2024-12-08T04:27:01,768 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportWithResetTtl,1,1733632021390.640fdbcf8bc742a7d599b5b4cf201f23. service=AccessControlService
2024-12-08T04:27:01,769 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911.
2024-12-08T04:27:01,769 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl 640fdbcf8bc742a7d599b5b4cf201f23
2024-12-08T04:27:01,769 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(894): Instantiated testtb-testExportWithResetTtl,1,1733632021390.640fdbcf8bc742a7d599b5b4cf201f23.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable
2024-12-08T04:27:01,769 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(7327): checking encryption for 640fdbcf8bc742a7d599b5b4cf201f23
2024-12-08T04:27:01,770 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(7330): checking classloading for 640fdbcf8bc742a7d599b5b4cf201f23
2024-12-08T04:27:01,771 INFO  [StoreOpener-640fdbcf8bc742a7d599b5b4cf201f23-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 640fdbcf8bc742a7d599b5b4cf201f23 
2024-12-08T04:27:01,773 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] handler.AssignRegionHandler(135): Open testtb-testExportWithResetTtl,,1733632021390.4f0c309d68305294d60edeb45abcb216.
2024-12-08T04:27:01,774 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(7285): Opening region: {ENCODED => 4f0c309d68305294d60edeb45abcb216, NAME => 'testtb-testExportWithResetTtl,,1733632021390.4f0c309d68305294d60edeb45abcb216.', STARTKEY => '', ENDKEY => '1'}
2024-12-08T04:27:01,774 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportWithResetTtl,,1733632021390.4f0c309d68305294d60edeb45abcb216. service=AccessControlService
2024-12-08T04:27:01,775 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911.
2024-12-08T04:27:01,775 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl 4f0c309d68305294d60edeb45abcb216
2024-12-08T04:27:01,775 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(894): Instantiated testtb-testExportWithResetTtl,,1733632021390.4f0c309d68305294d60edeb45abcb216.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable
2024-12-08T04:27:01,775 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(7327): checking encryption for 4f0c309d68305294d60edeb45abcb216
2024-12-08T04:27:01,775 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(7330): checking classloading for 4f0c309d68305294d60edeb45abcb216
2024-12-08T04:27:01,777 INFO  [StoreOpener-640fdbcf8bc742a7d599b5b4cf201f23-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 640fdbcf8bc742a7d599b5b4cf201f23 columnFamilyName cf
2024-12-08T04:27:01,777 DEBUG [StoreOpener-640fdbcf8bc742a7d599b5b4cf201f23-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker
2024-12-08T04:27:01,777 INFO  [StoreOpener-4f0c309d68305294d60edeb45abcb216-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 4f0c309d68305294d60edeb45abcb216 
2024-12-08T04:27:01,777 INFO  [StoreOpener-640fdbcf8bc742a7d599b5b4cf201f23-1 {}] regionserver.HStore(327): Store=640fdbcf8bc742a7d599b5b4cf201f23/cf,  memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE
2024-12-08T04:27:01,779 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithResetTtl/640fdbcf8bc742a7d599b5b4cf201f23
2024-12-08T04:27:01,779 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithResetTtl/640fdbcf8bc742a7d599b5b4cf201f23
2024-12-08T04:27:01,780 INFO  [StoreOpener-4f0c309d68305294d60edeb45abcb216-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4f0c309d68305294d60edeb45abcb216 columnFamilyName cf
2024-12-08T04:27:01,780 DEBUG [StoreOpener-4f0c309d68305294d60edeb45abcb216-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker
2024-12-08T04:27:01,782 INFO  [StoreOpener-4f0c309d68305294d60edeb45abcb216-1 {}] regionserver.HStore(327): Store=4f0c309d68305294d60edeb45abcb216/cf,  memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE
2024-12-08T04:27:01,782 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(1085): writing seq id for 640fdbcf8bc742a7d599b5b4cf201f23
2024-12-08T04:27:01,784 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithResetTtl/4f0c309d68305294d60edeb45abcb216
2024-12-08T04:27:01,785 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithResetTtl/640fdbcf8bc742a7d599b5b4cf201f23/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1
2024-12-08T04:27:01,785 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithResetTtl/4f0c309d68305294d60edeb45abcb216
2024-12-08T04:27:01,786 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(1102): Opened 640fdbcf8bc742a7d599b5b4cf201f23; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64942847, jitterRate=-0.03227616846561432}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1}
2024-12-08T04:27:01,788 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(1001): Region open journal for 640fdbcf8bc742a7d599b5b4cf201f23:

2024-12-08T04:27:01,790 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportWithResetTtl,1,1733632021390.640fdbcf8bc742a7d599b5b4cf201f23., pid=33, masterSystemTime=1733632021763
2024-12-08T04:27:01,793 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportWithResetTtl,1,1733632021390.640fdbcf8bc742a7d599b5b4cf201f23.
2024-12-08T04:27:01,793 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] handler.AssignRegionHandler(164): Opened testtb-testExportWithResetTtl,1,1733632021390.640fdbcf8bc742a7d599b5b4cf201f23.
2024-12-08T04:27:01,794 INFO  [PEWorker-2 {}] assignment.RegionStateStore(202): pid=32 updating hbase:meta row=640fdbcf8bc742a7d599b5b4cf201f23, regionState=OPEN, openSeqNum=2, regionLocation=428ded7e54d6,41743,1733631984189
2024-12-08T04:27:01,794 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(1085): writing seq id for 4f0c309d68305294d60edeb45abcb216
2024-12-08T04:27:01,801 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithResetTtl/4f0c309d68305294d60edeb45abcb216/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1
2024-12-08T04:27:01,801 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=33, resume processing ppid=32
2024-12-08T04:27:01,801 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=33, ppid=32, state=SUCCESS; OpenRegionProcedure 640fdbcf8bc742a7d599b5b4cf201f23, server=428ded7e54d6,41743,1733631984189 in 188 msec
2024-12-08T04:27:01,801 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(1102): Opened 4f0c309d68305294d60edeb45abcb216; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64188612, jitterRate=-0.043515145778656006}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1}
2024-12-08T04:27:01,802 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(1001): Region open journal for 4f0c309d68305294d60edeb45abcb216:

2024-12-08T04:27:01,803 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportWithResetTtl,,1733632021390.4f0c309d68305294d60edeb45abcb216., pid=34, masterSystemTime=1733632021763
2024-12-08T04:27:01,804 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=32, ppid=30, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=640fdbcf8bc742a7d599b5b4cf201f23, ASSIGN in 350 msec
2024-12-08T04:27:01,806 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportWithResetTtl,,1733632021390.4f0c309d68305294d60edeb45abcb216.
2024-12-08T04:27:01,806 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] handler.AssignRegionHandler(164): Opened testtb-testExportWithResetTtl,,1733632021390.4f0c309d68305294d60edeb45abcb216.
2024-12-08T04:27:01,807 INFO  [PEWorker-4 {}] assignment.RegionStateStore(202): pid=31 updating hbase:meta row=4f0c309d68305294d60edeb45abcb216, regionState=OPEN, openSeqNum=2, regionLocation=428ded7e54d6,45955,1733631983994
2024-12-08T04:27:01,812 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=34, resume processing ppid=31
2024-12-08T04:27:01,813 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=34, ppid=31, state=SUCCESS; OpenRegionProcedure 4f0c309d68305294d60edeb45abcb216, server=428ded7e54d6,45955,1733631983994 in 199 msec
2024-12-08T04:27:01,816 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=31, resume processing ppid=30
2024-12-08T04:27:01,816 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=31, ppid=30, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=4f0c309d68305294d60edeb45abcb216, ASSIGN in 361 msec
2024-12-08T04:27:01,817 INFO  [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=30, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE
2024-12-08T04:27:01,818 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733632021817"}]},"ts":"1733632021817"}
2024-12-08T04:27:01,819 INFO  [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithResetTtl, state=ENABLED in hbase:meta
2024-12-08T04:27:01,823 INFO  [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=30, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION
2024-12-08T04:27:01,823 DEBUG [PEWorker-3 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportWithResetTtl jenkins: RWXCA
2024-12-08T04:27:01,826 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41743 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA]
2024-12-08T04:27:01,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:27:01,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:27:01,828 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46421-0x1006fe072e80002, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:27:01,828 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:27:01,830 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04
2024-12-08T04:27:01,830 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04
2024-12-08T04:27:01,830 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04
2024-12-08T04:27:01,831 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04
2024-12-08T04:27:01,832 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=30, state=SUCCESS; CreateTableProcedure table=testtb-testExportWithResetTtl in 439 msec
2024-12-08T04:27:02,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30
2024-12-08T04:27:02,002 INFO  [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportWithResetTtl, procId: 30 completed
2024-12-08T04:27:02,003 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testExportWithResetTtl get assigned. Timeout = 60000ms
2024-12-08T04:27:02,004 INFO  [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1])
2024-12-08T04:27:02,011 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testExportWithResetTtl assigned to meta. Checking AM states.
2024-12-08T04:27:02,011 INFO  [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1])
2024-12-08T04:27:02,011 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testExportWithResetTtl assigned.
2024-12-08T04:27:02,016 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }
2024-12-08T04:27:02,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733632022016 (current time:1733632022016).
2024-12-08T04:27:02,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0
2024-12-08T04:27:02,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportWithResetTtl VERSION not specified, setting to 2
2024-12-08T04:27:02,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot
2024-12-08T04:27:02,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3f994fd6 to 127.0.0.1:55878 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@244b8951
2024-12-08T04:27:02,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@61be3277, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null
2024-12-08T04:27:02,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false
2024-12-08T04:27:02,031 INFO  [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32966, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService
2024-12-08T04:27:02,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3f994fd6 to 127.0.0.1:55878
2024-12-08T04:27:02,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(514): Stopping rpc client
2024-12-08T04:27:02,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x230ee2da to 127.0.0.1:55878 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5b77f6eb
2024-12-08T04:27:02,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b4a4a78, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null
2024-12-08T04:27:02,044 DEBUG [hconnection-0x7a66be51-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false
2024-12-08T04:27:02,046 INFO  [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32976, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService
2024-12-08T04:27:02,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x230ee2da to 127.0.0.1:55878
2024-12-08T04:27:02,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(514): Stopping rpc client
2024-12-08T04:27:02,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA]
2024-12-08T04:27:02,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot...
2024-12-08T04:27:02,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure2.ProcedureExecutor(1098): Stored pid=35, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }
2024-12-08T04:27:02,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 35
2024-12-08T04:27:02,053 INFO  [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE
2024-12-08T04:27:02,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35
2024-12-08T04:27:02,055 INFO  [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION
2024-12-08T04:27:02,058 INFO  [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO
2024-12-08T04:27:02,071 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741901_1077 (size=161)
2024-12-08T04:27:02,072 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741901_1077 (size=161)
2024-12-08T04:27:02,073 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741901_1077 (size=161)
2024-12-08T04:27:02,074 INFO  [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS
2024-12-08T04:27:02,074 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=36, ppid=35, state=RUNNABLE; SnapshotRegionProcedure 4f0c309d68305294d60edeb45abcb216}, {pid=37, ppid=35, state=RUNNABLE; SnapshotRegionProcedure 640fdbcf8bc742a7d599b5b4cf201f23}]
2024-12-08T04:27:02,075 INFO  [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=36, ppid=35, state=RUNNABLE; SnapshotRegionProcedure 4f0c309d68305294d60edeb45abcb216
2024-12-08T04:27:02,076 INFO  [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=37, ppid=35, state=RUNNABLE; SnapshotRegionProcedure 640fdbcf8bc742a7d599b5b4cf201f23
2024-12-08T04:27:02,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35
2024-12-08T04:27:02,210 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithResetTtl'
2024-12-08T04:27:02,228 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,45955,1733631983994
2024-12-08T04:27:02,228 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,41743,1733631984189
2024-12-08T04:27:02,229 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41743 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=37
2024-12-08T04:27:02,229 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45955 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=36
2024-12-08T04:27:02,229 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1733632021390.640fdbcf8bc742a7d599b5b4cf201f23.
2024-12-08T04:27:02,229 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1733632021390.4f0c309d68305294d60edeb45abcb216.
2024-12-08T04:27:02,229 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] regionserver.HRegion(2538): Flush status journal for 640fdbcf8bc742a7d599b5b4cf201f23:

2024-12-08T04:27:02,229 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1733632021390.640fdbcf8bc742a7d599b5b4cf201f23. for emptySnaptb0-testExportWithResetTtl completed.
2024-12-08T04:27:02,229 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] regionserver.HRegion(2538): Flush status journal for 4f0c309d68305294d60edeb45abcb216:

2024-12-08T04:27:02,230 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1733632021390.4f0c309d68305294d60edeb45abcb216. for emptySnaptb0-testExportWithResetTtl completed.
2024-12-08T04:27:02,230 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1733632021390.640fdbcf8bc742a7d599b5b4cf201f23.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl
2024-12-08T04:27:02,230 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] snapshot.SnapshotManifest(246): Creating references for hfiles
2024-12-08T04:27:02,230 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles
2024-12-08T04:27:02,230 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1733632021390.4f0c309d68305294d60edeb45abcb216.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl
2024-12-08T04:27:02,230 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] snapshot.SnapshotManifest(246): Creating references for hfiles
2024-12-08T04:27:02,230 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles
2024-12-08T04:27:02,242 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741902_1078 (size=68)
2024-12-08T04:27:02,243 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741902_1078 (size=68)
2024-12-08T04:27:02,243 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741902_1078 (size=68)
2024-12-08T04:27:02,243 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741903_1079 (size=68)
2024-12-08T04:27:02,244 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741903_1079 (size=68)
2024-12-08T04:27:02,244 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741903_1079 (size=68)
2024-12-08T04:27:02,244 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1733632021390.640fdbcf8bc742a7d599b5b4cf201f23.
2024-12-08T04:27:02,245 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=37
2024-12-08T04:27:02,245 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1733632021390.4f0c309d68305294d60edeb45abcb216.
2024-12-08T04:27:02,245 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=36
2024-12-08T04:27:02,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster(4106): Remote procedure done, pid=37
2024-12-08T04:27:02,245 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region 640fdbcf8bc742a7d599b5b4cf201f23
2024-12-08T04:27:02,245 INFO  [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=37, ppid=35, state=RUNNABLE; SnapshotRegionProcedure 640fdbcf8bc742a7d599b5b4cf201f23
2024-12-08T04:27:02,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster(4106): Remote procedure done, pid=36
2024-12-08T04:27:02,246 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region 4f0c309d68305294d60edeb45abcb216
2024-12-08T04:27:02,246 INFO  [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=36, ppid=35, state=RUNNABLE; SnapshotRegionProcedure 4f0c309d68305294d60edeb45abcb216
2024-12-08T04:27:02,249 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=37, ppid=35, state=SUCCESS; SnapshotRegionProcedure 640fdbcf8bc742a7d599b5b4cf201f23 in 173 msec
2024-12-08T04:27:02,253 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=36, resume processing ppid=35
2024-12-08T04:27:02,253 INFO  [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS
2024-12-08T04:27:02,253 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=36, ppid=35, state=SUCCESS; SnapshotRegionProcedure 4f0c309d68305294d60edeb45abcb216 in 173 msec
2024-12-08T04:27:02,254 INFO  [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION
2024-12-08T04:27:02,254 INFO  [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT
2024-12-08T04:27:02,254 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithResetTtl
2024-12-08T04:27:02,255 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl
2024-12-08T04:27:02,269 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741904_1080 (size=543)
2024-12-08T04:27:02,269 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741904_1080 (size=543)
2024-12-08T04:27:02,270 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741904_1080 (size=543)
2024-12-08T04:27:02,273 INFO  [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT
2024-12-08T04:27:02,280 INFO  [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT
2024-12-08T04:27:02,281 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/emptySnaptb0-testExportWithResetTtl
2024-12-08T04:27:02,283 INFO  [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION
2024-12-08T04:27:02,283 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 35
2024-12-08T04:27:02,284 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=35, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 232 msec
2024-12-08T04:27:02,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35
2024-12-08T04:27:02,357 INFO  [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl, procId: 35 completed
2024-12-08T04:27:02,366 DEBUG [htable-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false
2024-12-08T04:27:02,370 INFO  [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41743 {}] regionserver.HRegion(8254): writing data to region testtb-testExportWithResetTtl,1,1733632021390.640fdbcf8bc742a7d599b5b4cf201f23. with WAL disabled. Data may be lost in the event of a crash.
2024-12-08T04:27:02,373 INFO  [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59982, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService
2024-12-08T04:27:02,374 INFO  [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45955 {}] regionserver.HRegion(8254): writing data to region testtb-testExportWithResetTtl,,1733632021390.4f0c309d68305294d60edeb45abcb216. with WAL disabled. Data may be lost in the event of a crash.
2024-12-08T04:27:02,381 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportWithResetTtl
2024-12-08T04:27:02,381 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportWithResetTtl,,1733632021390.4f0c309d68305294d60edeb45abcb216.
2024-12-08T04:27:02,381 INFO  [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1])
2024-12-08T04:27:02,404 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }
2024-12-08T04:27:02,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733632022404 (current time:1733632022404).
2024-12-08T04:27:02,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0
2024-12-08T04:27:02,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportWithResetTtl VERSION not specified, setting to 2
2024-12-08T04:27:02,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot
2024-12-08T04:27:02,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x538e6ad6 to 127.0.0.1:55878 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1c55c2c0
2024-12-08T04:27:02,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66da878c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null
2024-12-08T04:27:02,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false
2024-12-08T04:27:02,425 INFO  [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32982, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService
2024-12-08T04:27:02,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x538e6ad6 to 127.0.0.1:55878
2024-12-08T04:27:02,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(514): Stopping rpc client
2024-12-08T04:27:02,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7bb7c1ac to 127.0.0.1:55878 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7223cdae
2024-12-08T04:27:02,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9605e76, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null
2024-12-08T04:27:02,443 DEBUG [hconnection-0x65c2bc49-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false
2024-12-08T04:27:02,445 INFO  [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32990, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService
2024-12-08T04:27:02,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7bb7c1ac to 127.0.0.1:55878
2024-12-08T04:27:02,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(514): Stopping rpc client
2024-12-08T04:27:02,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA]
2024-12-08T04:27:02,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot...
2024-12-08T04:27:02,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure2.ProcedureExecutor(1098): Stored pid=38, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }
2024-12-08T04:27:02,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 38
2024-12-08T04:27:02,455 INFO  [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE
2024-12-08T04:27:02,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38
2024-12-08T04:27:02,457 INFO  [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION
2024-12-08T04:27:02,462 INFO  [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO
2024-12-08T04:27:02,484 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741905_1081 (size=156)
2024-12-08T04:27:02,484 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741905_1081 (size=156)
2024-12-08T04:27:02,485 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741905_1081 (size=156)
2024-12-08T04:27:02,488 INFO  [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS
2024-12-08T04:27:02,488 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE; SnapshotRegionProcedure 4f0c309d68305294d60edeb45abcb216}, {pid=40, ppid=38, state=RUNNABLE; SnapshotRegionProcedure 640fdbcf8bc742a7d599b5b4cf201f23}]
2024-12-08T04:27:02,490 INFO  [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=40, ppid=38, state=RUNNABLE; SnapshotRegionProcedure 640fdbcf8bc742a7d599b5b4cf201f23
2024-12-08T04:27:02,490 INFO  [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=39, ppid=38, state=RUNNABLE; SnapshotRegionProcedure 4f0c309d68305294d60edeb45abcb216
2024-12-08T04:27:02,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38
2024-12-08T04:27:02,644 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,41743,1733631984189
2024-12-08T04:27:02,645 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41743 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=40
2024-12-08T04:27:02,645 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1733632021390.640fdbcf8bc742a7d599b5b4cf201f23.
2024-12-08T04:27:02,645 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.HRegion(2837): Flushing 640fdbcf8bc742a7d599b5b4cf201f23 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB
2024-12-08T04:27:02,647 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,45955,1733631983994
2024-12-08T04:27:02,647 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45955 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=39
2024-12-08T04:27:02,648 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1733632021390.4f0c309d68305294d60edeb45abcb216.
2024-12-08T04:27:02,648 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.HRegion(2837): Flushing 4f0c309d68305294d60edeb45abcb216 1/1 column families, dataSize=199 B heapSize=688 B
2024-12-08T04:27:02,675 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithResetTtl/4f0c309d68305294d60edeb45abcb216/.tmp/cf/583d366d1a2441a59b53ea260cdee40c is 71, key is 0854f17f8ee622ab21972019b81b3b1e/cf:q/1733632022374/Put/seqid=0
2024-12-08T04:27:02,678 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithResetTtl/640fdbcf8bc742a7d599b5b4cf201f23/.tmp/cf/de5c8814dd0f47aa97ad39b0df3aace9 is 71, key is 145f704ffaad4beaf9821de6fe992e8d/cf:q/1733632022370/Put/seqid=0
2024-12-08T04:27:02,699 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741906_1082 (size=5288)
2024-12-08T04:27:02,699 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741906_1082 (size=5288)
2024-12-08T04:27:02,702 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741906_1082 (size=5288)
2024-12-08T04:27:02,702 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=199 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithResetTtl/4f0c309d68305294d60edeb45abcb216/.tmp/cf/583d366d1a2441a59b53ea260cdee40c
2024-12-08T04:27:02,704 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741907_1083 (size=8324)
2024-12-08T04:27:02,704 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741907_1083 (size=8324)
2024-12-08T04:27:02,704 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741907_1083 (size=8324)
2024-12-08T04:27:02,707 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.06 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithResetTtl/640fdbcf8bc742a7d599b5b4cf201f23/.tmp/cf/de5c8814dd0f47aa97ad39b0df3aace9
2024-12-08T04:27:02,717 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithResetTtl/640fdbcf8bc742a7d599b5b4cf201f23/.tmp/cf/de5c8814dd0f47aa97ad39b0df3aace9 as hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithResetTtl/640fdbcf8bc742a7d599b5b4cf201f23/cf/de5c8814dd0f47aa97ad39b0df3aace9
2024-12-08T04:27:02,725 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithResetTtl/640fdbcf8bc742a7d599b5b4cf201f23/cf/de5c8814dd0f47aa97ad39b0df3aace9, entries=47, sequenceid=6, filesize=8.1 K
2024-12-08T04:27:02,727 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.HRegion(3040): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for 640fdbcf8bc742a7d599b5b4cf201f23 in 82ms, sequenceid=6, compaction requested=false
2024-12-08T04:27:02,727 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.HRegion(2538): Flush status journal for 640fdbcf8bc742a7d599b5b4cf201f23:

2024-12-08T04:27:02,728 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1733632021390.640fdbcf8bc742a7d599b5b4cf201f23. for snaptb0-testExportWithResetTtl completed.
2024-12-08T04:27:02,728 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1733632021390.640fdbcf8bc742a7d599b5b4cf201f23.' region-info for snapshot=snaptb0-testExportWithResetTtl
2024-12-08T04:27:02,728 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] snapshot.SnapshotManifest(246): Creating references for hfiles
2024-12-08T04:27:02,728 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithResetTtl/640fdbcf8bc742a7d599b5b4cf201f23/cf/de5c8814dd0f47aa97ad39b0df3aace9] hfiles
2024-12-08T04:27:02,728 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithResetTtl/640fdbcf8bc742a7d599b5b4cf201f23/cf/de5c8814dd0f47aa97ad39b0df3aace9 for snapshot=snaptb0-testExportWithResetTtl
2024-12-08T04:27:02,734 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithResetTtl/4f0c309d68305294d60edeb45abcb216/.tmp/cf/583d366d1a2441a59b53ea260cdee40c as hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithResetTtl/4f0c309d68305294d60edeb45abcb216/cf/583d366d1a2441a59b53ea260cdee40c
2024-12-08T04:27:02,741 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithResetTtl/4f0c309d68305294d60edeb45abcb216/cf/583d366d1a2441a59b53ea260cdee40c, entries=3, sequenceid=6, filesize=5.2 K
2024-12-08T04:27:02,745 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.HRegion(3040): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for 4f0c309d68305294d60edeb45abcb216 in 96ms, sequenceid=6, compaction requested=false
2024-12-08T04:27:02,745 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.HRegion(2538): Flush status journal for 4f0c309d68305294d60edeb45abcb216:

2024-12-08T04:27:02,745 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1733632021390.4f0c309d68305294d60edeb45abcb216. for snaptb0-testExportWithResetTtl completed.
2024-12-08T04:27:02,745 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1733632021390.4f0c309d68305294d60edeb45abcb216.' region-info for snapshot=snaptb0-testExportWithResetTtl
2024-12-08T04:27:02,745 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] snapshot.SnapshotManifest(246): Creating references for hfiles
2024-12-08T04:27:02,745 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithResetTtl/4f0c309d68305294d60edeb45abcb216/cf/583d366d1a2441a59b53ea260cdee40c] hfiles
2024-12-08T04:27:02,745 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithResetTtl/4f0c309d68305294d60edeb45abcb216/cf/583d366d1a2441a59b53ea260cdee40c for snapshot=snaptb0-testExportWithResetTtl
2024-12-08T04:27:02,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38
2024-12-08T04:27:02,788 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741908_1084 (size=107)
2024-12-08T04:27:02,788 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741908_1084 (size=107)
2024-12-08T04:27:02,789 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741908_1084 (size=107)
2024-12-08T04:27:02,789 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1733632021390.640fdbcf8bc742a7d599b5b4cf201f23.
2024-12-08T04:27:02,790 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=40
2024-12-08T04:27:02,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster(4106): Remote procedure done, pid=40
2024-12-08T04:27:02,790 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region 640fdbcf8bc742a7d599b5b4cf201f23
2024-12-08T04:27:02,790 INFO  [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=40, ppid=38, state=RUNNABLE; SnapshotRegionProcedure 640fdbcf8bc742a7d599b5b4cf201f23
2024-12-08T04:27:02,794 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=40, ppid=38, state=SUCCESS; SnapshotRegionProcedure 640fdbcf8bc742a7d599b5b4cf201f23 in 304 msec
2024-12-08T04:27:02,818 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741909_1085 (size=107)
2024-12-08T04:27:02,818 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741909_1085 (size=107)
2024-12-08T04:27:02,818 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741909_1085 (size=107)
2024-12-08T04:27:02,819 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1733632021390.4f0c309d68305294d60edeb45abcb216.
2024-12-08T04:27:02,819 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=39
2024-12-08T04:27:02,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster(4106): Remote procedure done, pid=39
2024-12-08T04:27:02,820 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region 4f0c309d68305294d60edeb45abcb216
2024-12-08T04:27:02,820 INFO  [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=39, ppid=38, state=RUNNABLE; SnapshotRegionProcedure 4f0c309d68305294d60edeb45abcb216
2024-12-08T04:27:02,826 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=39, resume processing ppid=38
2024-12-08T04:27:02,826 INFO  [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS
2024-12-08T04:27:02,826 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=39, ppid=38, state=SUCCESS; SnapshotRegionProcedure 4f0c309d68305294d60edeb45abcb216 in 333 msec
2024-12-08T04:27:02,827 INFO  [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION
2024-12-08T04:27:02,828 INFO  [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT
2024-12-08T04:27:02,828 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithResetTtl
2024-12-08T04:27:02,829 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl
2024-12-08T04:27:02,857 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741910_1086 (size=621)
2024-12-08T04:27:02,857 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741910_1086 (size=621)
2024-12-08T04:27:02,858 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741910_1086 (size=621)
2024-12-08T04:27:02,861 INFO  [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT
2024-12-08T04:27:02,873 INFO  [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT
2024-12-08T04:27:02,874 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/snaptb0-testExportWithResetTtl
2024-12-08T04:27:02,877 INFO  [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION
2024-12-08T04:27:02,877 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 38
2024-12-08T04:27:02,879 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=38, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 425 msec
2024-12-08T04:27:03,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38
2024-12-08T04:27:03,061 INFO  [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl, procId: 38 completed
2024-12-08T04:27:03,063 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}
2024-12-08T04:27:03,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure2.ProcedureExecutor(1098): Stored pid=41, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testExportWithResetTtl
2024-12-08T04:27:03,067 INFO  [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION
2024-12-08T04:27:03,067 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker
2024-12-08T04:27:03,067 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default"
qualifier: "testExportWithResetTtl"
 procId is: 41
2024-12-08T04:27:03,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41
2024-12-08T04:27:03,068 INFO  [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT
2024-12-08T04:27:03,133 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741911_1087 (size=397)
2024-12-08T04:27:03,133 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741911_1087 (size=397)
2024-12-08T04:27:03,134 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741911_1087 (size=397)
2024-12-08T04:27:03,146 INFO  [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => a25f91b266634acbcde6560820b83824, NAME => 'testExportWithResetTtl,,1733632023063.a25f91b266634acbcde6560820b83824.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720
2024-12-08T04:27:03,146 INFO  [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => c85ad77adf8bfc87828df2608e1dd846, NAME => 'testExportWithResetTtl,1,1733632023063.c85ad77adf8bfc87828df2608e1dd846.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720
2024-12-08T04:27:03,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41
2024-12-08T04:27:03,187 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741912_1088 (size=58)
2024-12-08T04:27:03,188 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741912_1088 (size=58)
2024-12-08T04:27:03,189 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741912_1088 (size=58)
2024-12-08T04:27:03,190 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(894): Instantiated testExportWithResetTtl,,1733632023063.a25f91b266634acbcde6560820b83824.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable
2024-12-08T04:27:03,190 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1681): Closing a25f91b266634acbcde6560820b83824, disabling compactions & flushes
2024-12-08T04:27:03,190 INFO  [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1703): Closing region testExportWithResetTtl,,1733632023063.a25f91b266634acbcde6560820b83824.
2024-12-08T04:27:03,190 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportWithResetTtl,,1733632023063.a25f91b266634acbcde6560820b83824.
2024-12-08T04:27:03,190 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testExportWithResetTtl,,1733632023063.a25f91b266634acbcde6560820b83824. after waiting 0 ms
2024-12-08T04:27:03,190 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testExportWithResetTtl,,1733632023063.a25f91b266634acbcde6560820b83824.
2024-12-08T04:27:03,190 INFO  [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1922): Closed testExportWithResetTtl,,1733632023063.a25f91b266634acbcde6560820b83824.
2024-12-08T04:27:03,190 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1635): Region close journal for a25f91b266634acbcde6560820b83824:

2024-12-08T04:27:03,205 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741913_1089 (size=58)
2024-12-08T04:27:03,205 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741913_1089 (size=58)
2024-12-08T04:27:03,205 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741913_1089 (size=58)
2024-12-08T04:27:03,206 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(894): Instantiated testExportWithResetTtl,1,1733632023063.c85ad77adf8bfc87828df2608e1dd846.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable
2024-12-08T04:27:03,206 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1681): Closing c85ad77adf8bfc87828df2608e1dd846, disabling compactions & flushes
2024-12-08T04:27:03,206 INFO  [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1703): Closing region testExportWithResetTtl,1,1733632023063.c85ad77adf8bfc87828df2608e1dd846.
2024-12-08T04:27:03,207 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportWithResetTtl,1,1733632023063.c85ad77adf8bfc87828df2608e1dd846.
2024-12-08T04:27:03,207 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testExportWithResetTtl,1,1733632023063.c85ad77adf8bfc87828df2608e1dd846. after waiting 0 ms
2024-12-08T04:27:03,207 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testExportWithResetTtl,1,1733632023063.c85ad77adf8bfc87828df2608e1dd846.
2024-12-08T04:27:03,207 INFO  [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1922): Closed testExportWithResetTtl,1,1733632023063.c85ad77adf8bfc87828df2608e1dd846.
2024-12-08T04:27:03,207 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1635): Region close journal for c85ad77adf8bfc87828df2608e1dd846:

2024-12-08T04:27:03,210 INFO  [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META
2024-12-08T04:27:03,210 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testExportWithResetTtl,,1733632023063.a25f91b266634acbcde6560820b83824.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1733632023210"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733632023210"}]},"ts":"1733632023210"}
2024-12-08T04:27:03,210 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testExportWithResetTtl,1,1733632023063.c85ad77adf8bfc87828df2608e1dd846.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1733632023210"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733632023210"}]},"ts":"1733632023210"}
2024-12-08T04:27:03,213 INFO  [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta.
2024-12-08T04:27:03,215 INFO  [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS
2024-12-08T04:27:03,215 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733632023215"}]},"ts":"1733632023215"}
2024-12-08T04:27:03,218 INFO  [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testExportWithResetTtl, state=ENABLING in hbase:meta
2024-12-08T04:27:03,228 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(202): Hosts are {428ded7e54d6=0} racks are {/default-rack=0}
2024-12-08T04:27:03,230 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 is on host 0
2024-12-08T04:27:03,230 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 is on host 0
2024-12-08T04:27:03,230 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 is on host 0
2024-12-08T04:27:03,230 INFO  [PEWorker-5 {}] balancer.BalancerClusterState(314): server 0 is on rack 0
2024-12-08T04:27:03,230 INFO  [PEWorker-5 {}] balancer.BalancerClusterState(314): server 1 is on rack 0
2024-12-08T04:27:03,230 INFO  [PEWorker-5 {}] balancer.BalancerClusterState(314): server 2 is on rack 0
2024-12-08T04:27:03,230 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1
2024-12-08T04:27:03,230 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportWithResetTtl, region=a25f91b266634acbcde6560820b83824, ASSIGN}, {pid=43, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportWithResetTtl, region=c85ad77adf8bfc87828df2608e1dd846, ASSIGN}]
2024-12-08T04:27:03,231 INFO  [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=43, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportWithResetTtl, region=c85ad77adf8bfc87828df2608e1dd846, ASSIGN
2024-12-08T04:27:03,231 INFO  [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportWithResetTtl, region=a25f91b266634acbcde6560820b83824, ASSIGN
2024-12-08T04:27:03,233 INFO  [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=a25f91b266634acbcde6560820b83824, ASSIGN; state=OFFLINE, location=428ded7e54d6,46421,1733631984115; forceNewPlan=false, retain=false
2024-12-08T04:27:03,233 INFO  [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=43, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=c85ad77adf8bfc87828df2608e1dd846, ASSIGN; state=OFFLINE, location=428ded7e54d6,45955,1733631983994; forceNewPlan=false, retain=false
2024-12-08T04:27:03,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41
2024-12-08T04:27:03,384 INFO  [428ded7e54d6:46337 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 
2024-12-08T04:27:03,384 INFO  [PEWorker-2 {}] assignment.RegionStateStore(202): pid=43 updating hbase:meta row=c85ad77adf8bfc87828df2608e1dd846, regionState=OPENING, regionLocation=428ded7e54d6,45955,1733631983994
2024-12-08T04:27:03,384 INFO  [PEWorker-4 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=a25f91b266634acbcde6560820b83824, regionState=OPENING, regionLocation=428ded7e54d6,46421,1733631984115
2024-12-08T04:27:03,386 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=44, ppid=42, state=RUNNABLE; OpenRegionProcedure a25f91b266634acbcde6560820b83824, server=428ded7e54d6,46421,1733631984115}]
2024-12-08T04:27:03,392 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=45, ppid=43, state=RUNNABLE; OpenRegionProcedure c85ad77adf8bfc87828df2608e1dd846, server=428ded7e54d6,45955,1733631983994}]
2024-12-08T04:27:03,540 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,46421,1733631984115
2024-12-08T04:27:03,544 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,45955,1733631983994
2024-12-08T04:27:03,546 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] handler.AssignRegionHandler(135): Open testExportWithResetTtl,,1733632023063.a25f91b266634acbcde6560820b83824.
2024-12-08T04:27:03,546 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7285): Opening region: {ENCODED => a25f91b266634acbcde6560820b83824, NAME => 'testExportWithResetTtl,,1733632023063.a25f91b266634acbcde6560820b83824.', STARTKEY => '', ENDKEY => '1'}
2024-12-08T04:27:03,546 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7999): Registered coprocessor service: region=testExportWithResetTtl,,1733632023063.a25f91b266634acbcde6560820b83824. service=AccessControlService
2024-12-08T04:27:03,547 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911.
2024-12-08T04:27:03,547 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl a25f91b266634acbcde6560820b83824
2024-12-08T04:27:03,547 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(894): Instantiated testExportWithResetTtl,,1733632023063.a25f91b266634acbcde6560820b83824.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable
2024-12-08T04:27:03,547 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7327): checking encryption for a25f91b266634acbcde6560820b83824
2024-12-08T04:27:03,547 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7330): checking classloading for a25f91b266634acbcde6560820b83824
2024-12-08T04:27:03,549 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] handler.AssignRegionHandler(135): Open testExportWithResetTtl,1,1733632023063.c85ad77adf8bfc87828df2608e1dd846.
2024-12-08T04:27:03,549 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(7285): Opening region: {ENCODED => c85ad77adf8bfc87828df2608e1dd846, NAME => 'testExportWithResetTtl,1,1733632023063.c85ad77adf8bfc87828df2608e1dd846.', STARTKEY => '1', ENDKEY => ''}
2024-12-08T04:27:03,549 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(7999): Registered coprocessor service: region=testExportWithResetTtl,1,1733632023063.c85ad77adf8bfc87828df2608e1dd846. service=AccessControlService
2024-12-08T04:27:03,550 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911.
2024-12-08T04:27:03,550 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl c85ad77adf8bfc87828df2608e1dd846
2024-12-08T04:27:03,550 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(894): Instantiated testExportWithResetTtl,1,1733632023063.c85ad77adf8bfc87828df2608e1dd846.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable
2024-12-08T04:27:03,550 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(7327): checking encryption for c85ad77adf8bfc87828df2608e1dd846
2024-12-08T04:27:03,550 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(7330): checking classloading for c85ad77adf8bfc87828df2608e1dd846
2024-12-08T04:27:03,551 INFO  [StoreOpener-a25f91b266634acbcde6560820b83824-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region a25f91b266634acbcde6560820b83824 
2024-12-08T04:27:03,552 INFO  [StoreOpener-c85ad77adf8bfc87828df2608e1dd846-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region c85ad77adf8bfc87828df2608e1dd846 
2024-12-08T04:27:03,554 INFO  [StoreOpener-c85ad77adf8bfc87828df2608e1dd846-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c85ad77adf8bfc87828df2608e1dd846 columnFamilyName cf
2024-12-08T04:27:03,554 DEBUG [StoreOpener-c85ad77adf8bfc87828df2608e1dd846-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker
2024-12-08T04:27:03,555 INFO  [StoreOpener-a25f91b266634acbcde6560820b83824-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a25f91b266634acbcde6560820b83824 columnFamilyName cf
2024-12-08T04:27:03,555 INFO  [StoreOpener-c85ad77adf8bfc87828df2608e1dd846-1 {}] regionserver.HStore(327): Store=c85ad77adf8bfc87828df2608e1dd846/cf,  memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE
2024-12-08T04:27:03,555 DEBUG [StoreOpener-a25f91b266634acbcde6560820b83824-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker
2024-12-08T04:27:03,556 INFO  [StoreOpener-a25f91b266634acbcde6560820b83824-1 {}] regionserver.HStore(327): Store=a25f91b266634acbcde6560820b83824/cf,  memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE
2024-12-08T04:27:03,557 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testExportWithResetTtl/c85ad77adf8bfc87828df2608e1dd846
2024-12-08T04:27:03,557 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testExportWithResetTtl/a25f91b266634acbcde6560820b83824
2024-12-08T04:27:03,558 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testExportWithResetTtl/a25f91b266634acbcde6560820b83824
2024-12-08T04:27:03,558 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testExportWithResetTtl/c85ad77adf8bfc87828df2608e1dd846
2024-12-08T04:27:03,563 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(1085): writing seq id for c85ad77adf8bfc87828df2608e1dd846
2024-12-08T04:27:03,564 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1085): writing seq id for a25f91b266634acbcde6560820b83824
2024-12-08T04:27:03,567 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testExportWithResetTtl/c85ad77adf8bfc87828df2608e1dd846/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1
2024-12-08T04:27:03,570 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(1102): Opened c85ad77adf8bfc87828df2608e1dd846; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63951912, jitterRate=-0.047042250633239746}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1}
2024-12-08T04:27:03,571 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(1001): Region open journal for c85ad77adf8bfc87828df2608e1dd846:

2024-12-08T04:27:03,572 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegionServer(2601): Post open deploy tasks for testExportWithResetTtl,1,1733632023063.c85ad77adf8bfc87828df2608e1dd846., pid=45, masterSystemTime=1733632023544
2024-12-08T04:27:03,576 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegionServer(2628): Finished post open deploy task for testExportWithResetTtl,1,1733632023063.c85ad77adf8bfc87828df2608e1dd846.
2024-12-08T04:27:03,576 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] handler.AssignRegionHandler(164): Opened testExportWithResetTtl,1,1733632023063.c85ad77adf8bfc87828df2608e1dd846.
2024-12-08T04:27:03,578 INFO  [PEWorker-1 {}] assignment.RegionStateStore(202): pid=43 updating hbase:meta row=c85ad77adf8bfc87828df2608e1dd846, regionState=OPEN, openSeqNum=2, regionLocation=428ded7e54d6,45955,1733631983994
2024-12-08T04:27:03,586 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testExportWithResetTtl/a25f91b266634acbcde6560820b83824/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1
2024-12-08T04:27:03,587 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1102): Opened a25f91b266634acbcde6560820b83824; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64557845, jitterRate=-0.038013145327568054}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1}
2024-12-08T04:27:03,587 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=45, resume processing ppid=43
2024-12-08T04:27:03,587 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1001): Region open journal for a25f91b266634acbcde6560820b83824:

2024-12-08T04:27:03,587 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=45, ppid=43, state=SUCCESS; OpenRegionProcedure c85ad77adf8bfc87828df2608e1dd846, server=428ded7e54d6,45955,1733631983994 in 188 msec
2024-12-08T04:27:03,588 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegionServer(2601): Post open deploy tasks for testExportWithResetTtl,,1733632023063.a25f91b266634acbcde6560820b83824., pid=44, masterSystemTime=1733632023540
2024-12-08T04:27:03,589 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=43, ppid=41, state=SUCCESS; TransitRegionStateProcedure table=testExportWithResetTtl, region=c85ad77adf8bfc87828df2608e1dd846, ASSIGN in 357 msec
2024-12-08T04:27:03,590 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegionServer(2628): Finished post open deploy task for testExportWithResetTtl,,1733632023063.a25f91b266634acbcde6560820b83824.
2024-12-08T04:27:03,590 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] handler.AssignRegionHandler(164): Opened testExportWithResetTtl,,1733632023063.a25f91b266634acbcde6560820b83824.
2024-12-08T04:27:03,591 INFO  [PEWorker-5 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=a25f91b266634acbcde6560820b83824, regionState=OPEN, openSeqNum=2, regionLocation=428ded7e54d6,46421,1733631984115
2024-12-08T04:27:03,594 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=44, resume processing ppid=42
2024-12-08T04:27:03,595 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=44, ppid=42, state=SUCCESS; OpenRegionProcedure a25f91b266634acbcde6560820b83824, server=428ded7e54d6,46421,1733631984115 in 206 msec
2024-12-08T04:27:03,596 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=42, resume processing ppid=41
2024-12-08T04:27:03,596 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=42, ppid=41, state=SUCCESS; TransitRegionStateProcedure table=testExportWithResetTtl, region=a25f91b266634acbcde6560820b83824, ASSIGN in 364 msec
2024-12-08T04:27:03,597 INFO  [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE
2024-12-08T04:27:03,598 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733632023597"}]},"ts":"1733632023597"}
2024-12-08T04:27:03,599 INFO  [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testExportWithResetTtl, state=ENABLED in hbase:meta
2024-12-08T04:27:03,602 INFO  [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION
2024-12-08T04:27:03,602 DEBUG [PEWorker-2 {}] access.PermissionStorage(175): Writing permission with rowKey testExportWithResetTtl jenkins: RWXCA
2024-12-08T04:27:03,605 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41743 {}] access.PermissionStorage(611): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA]
2024-12-08T04:27:03,607 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:27:03,607 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:27:03,607 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46421-0x1006fe072e80002, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:27:03,607 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:27:03,611 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04
2024-12-08T04:27:03,611 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04
2024-12-08T04:27:03,611 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04
2024-12-08T04:27:03,611 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04
2024-12-08T04:27:03,611 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04
2024-12-08T04:27:03,611 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04
2024-12-08T04:27:03,611 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04
2024-12-08T04:27:03,611 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04
2024-12-08T04:27:03,611 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=41, state=SUCCESS; CreateTableProcedure table=testExportWithResetTtl in 545 msec
2024-12-08T04:27:03,632 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl
2024-12-08T04:27:03,632 INFO  [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl Metrics about Tables on a single HBase RegionServer
2024-12-08T04:27:03,633 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl
2024-12-08T04:27:03,634 INFO  [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl Metrics about Tables on a single HBase RegionServer
2024-12-08T04:27:03,634 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName
2024-12-08T04:27:03,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41
2024-12-08T04:27:03,673 INFO  [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testExportWithResetTtl, procId: 41 completed
2024-12-08T04:27:03,673 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testExportWithResetTtl get assigned. Timeout = 60000ms
2024-12-08T04:27:03,673 INFO  [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1])
2024-12-08T04:27:03,677 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testExportWithResetTtl assigned to meta. Checking AM states.
2024-12-08T04:27:03,677 INFO  [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1])
2024-12-08T04:27:03,678 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testExportWithResetTtl assigned.
2024-12-08T04:27:03,692 INFO  [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46421 {}] regionserver.HRegion(8254): writing data to region testExportWithResetTtl,,1733632023063.a25f91b266634acbcde6560820b83824. with WAL disabled. Data may be lost in the event of a crash.
2024-12-08T04:27:03,694 INFO  [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45955 {}] regionserver.HRegion(8254): writing data to region testExportWithResetTtl,1,1733632023063.c85ad77adf8bfc87828df2608e1dd846. with WAL disabled. Data may be lost in the event of a crash.
2024-12-08T04:27:03,702 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testExportWithResetTtl
2024-12-08T04:27:03,702 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testExportWithResetTtl,,1733632023063.a25f91b266634acbcde6560820b83824.
2024-12-08T04:27:03,702 INFO  [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1])
2024-12-08T04:27:03,721 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }
2024-12-08T04:27:03,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733632023721 (current time:1733632023721).
2024-12-08T04:27:03,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb-testExportWithResetTtl VERSION not specified, setting to 2
2024-12-08T04:27:03,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot
2024-12-08T04:27:03,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x071d283b to 127.0.0.1:55878 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@55fee77e
2024-12-08T04:27:03,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4fcacdf7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null
2024-12-08T04:27:03,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false
2024-12-08T04:27:03,729 INFO  [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36190, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService
2024-12-08T04:27:03,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x071d283b to 127.0.0.1:55878
2024-12-08T04:27:03,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(514): Stopping rpc client
2024-12-08T04:27:03,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4d2fa1ce to 127.0.0.1:55878 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6fc87a02
2024-12-08T04:27:03,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76f4a779, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null
2024-12-08T04:27:03,745 DEBUG [hconnection-0x3d69d6f8-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false
2024-12-08T04:27:03,746 INFO  [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36206, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService
2024-12-08T04:27:03,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4d2fa1ce to 127.0.0.1:55878
2024-12-08T04:27:03,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(514): Stopping rpc client
2024-12-08T04:27:03,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] access.PermissionStorage(611): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA]
2024-12-08T04:27:03,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot...
2024-12-08T04:27:03,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure2.ProcedureExecutor(1098): Stored pid=46, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }
2024-12-08T04:27:03,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 46
2024-12-08T04:27:03,755 INFO  [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PREPARE
2024-12-08T04:27:03,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=46
2024-12-08T04:27:03,756 INFO  [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PRE_OPERATION
2024-12-08T04:27:03,759 INFO  [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO
2024-12-08T04:27:03,786 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741914_1090 (size=143)
2024-12-08T04:27:03,787 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741914_1090 (size=143)
2024-12-08T04:27:03,788 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741914_1090 (size=143)
2024-12-08T04:27:03,790 INFO  [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS
2024-12-08T04:27:03,790 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=47, ppid=46, state=RUNNABLE; SnapshotRegionProcedure a25f91b266634acbcde6560820b83824}, {pid=48, ppid=46, state=RUNNABLE; SnapshotRegionProcedure c85ad77adf8bfc87828df2608e1dd846}]
2024-12-08T04:27:03,792 INFO  [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=48, ppid=46, state=RUNNABLE; SnapshotRegionProcedure c85ad77adf8bfc87828df2608e1dd846
2024-12-08T04:27:03,792 INFO  [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=47, ppid=46, state=RUNNABLE; SnapshotRegionProcedure a25f91b266634acbcde6560820b83824
2024-12-08T04:27:03,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=46
2024-12-08T04:27:03,945 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,46421,1733631984115
2024-12-08T04:27:03,945 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,45955,1733631983994
2024-12-08T04:27:03,946 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45955 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=48
2024-12-08T04:27:03,946 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46421 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=47
2024-12-08T04:27:03,946 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,,1733632023063.a25f91b266634acbcde6560820b83824.
2024-12-08T04:27:03,946 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,1,1733632023063.c85ad77adf8bfc87828df2608e1dd846.
2024-12-08T04:27:03,947 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.HRegion(2837): Flushing a25f91b266634acbcde6560820b83824 1/1 column families, dataSize=400 B heapSize=1.09 KB
2024-12-08T04:27:03,947 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.HRegion(2837): Flushing c85ad77adf8bfc87828df2608e1dd846 1/1 column families, dataSize=2.87 KB heapSize=6.44 KB
2024-12-08T04:27:03,973 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testExportWithResetTtl/a25f91b266634acbcde6560820b83824/.tmp/cf/551b1e0c80f8429dbbec99a69128592b is 71, key is 0150bd9db4358fcca31ef5178cadca89/cf:q/1733632023692/Put/seqid=0
2024-12-08T04:27:03,973 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testExportWithResetTtl/c85ad77adf8bfc87828df2608e1dd846/.tmp/cf/0bea5e92ce1e40cbb0247aafe81f8c2e is 71, key is 14969a3e81f12e1e10e8eaef4a520b00/cf:q/1733632023694/Put/seqid=0
2024-12-08T04:27:04,001 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741916_1092 (size=8122)
2024-12-08T04:27:04,003 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741915_1091 (size=5490)
2024-12-08T04:27:04,003 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741916_1092 (size=8122)
2024-12-08T04:27:04,004 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741916_1092 (size=8122)
2024-12-08T04:27:04,004 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741915_1091 (size=5490)
2024-12-08T04:27:04,004 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.87 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testExportWithResetTtl/c85ad77adf8bfc87828df2608e1dd846/.tmp/cf/0bea5e92ce1e40cbb0247aafe81f8c2e
2024-12-08T04:27:04,005 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741915_1091 (size=5490)
2024-12-08T04:27:04,005 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=400 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testExportWithResetTtl/a25f91b266634acbcde6560820b83824/.tmp/cf/551b1e0c80f8429dbbec99a69128592b
2024-12-08T04:27:04,018 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testExportWithResetTtl/c85ad77adf8bfc87828df2608e1dd846/.tmp/cf/0bea5e92ce1e40cbb0247aafe81f8c2e as hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testExportWithResetTtl/c85ad77adf8bfc87828df2608e1dd846/cf/0bea5e92ce1e40cbb0247aafe81f8c2e
2024-12-08T04:27:04,019 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testExportWithResetTtl/a25f91b266634acbcde6560820b83824/.tmp/cf/551b1e0c80f8429dbbec99a69128592b as hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testExportWithResetTtl/a25f91b266634acbcde6560820b83824/cf/551b1e0c80f8429dbbec99a69128592b
2024-12-08T04:27:04,028 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testExportWithResetTtl/c85ad77adf8bfc87828df2608e1dd846/cf/0bea5e92ce1e40cbb0247aafe81f8c2e, entries=44, sequenceid=5, filesize=7.9 K
2024-12-08T04:27:04,028 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testExportWithResetTtl/a25f91b266634acbcde6560820b83824/cf/551b1e0c80f8429dbbec99a69128592b, entries=6, sequenceid=5, filesize=5.4 K
2024-12-08T04:27:04,029 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.HRegion(3040): Finished flush of dataSize ~2.87 KB/2936, heapSize ~6.42 KB/6576, currentSize=0 B/0 for c85ad77adf8bfc87828df2608e1dd846 in 82ms, sequenceid=5, compaction requested=false
2024-12-08T04:27:04,029 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportWithResetTtl'
2024-12-08T04:27:04,029 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.HRegion(3040): Finished flush of dataSize ~400 B/400, heapSize ~1.08 KB/1104, currentSize=0 B/0 for a25f91b266634acbcde6560820b83824 in 82ms, sequenceid=5, compaction requested=false
2024-12-08T04:27:04,029 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportWithResetTtl'
2024-12-08T04:27:04,029 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.HRegion(2538): Flush status journal for c85ad77adf8bfc87828df2608e1dd846:

2024-12-08T04:27:04,029 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.HRegion(2538): Flush status journal for a25f91b266634acbcde6560820b83824:

2024-12-08T04:27:04,029 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,1,1733632023063.c85ad77adf8bfc87828df2608e1dd846. for snaptb-testExportWithResetTtl completed.
2024-12-08T04:27:04,030 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,,1733632023063.a25f91b266634acbcde6560820b83824. for snaptb-testExportWithResetTtl completed.
2024-12-08T04:27:04,030 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,1,1733632023063.c85ad77adf8bfc87828df2608e1dd846.' region-info for snapshot=snaptb-testExportWithResetTtl
2024-12-08T04:27:04,030 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,,1733632023063.a25f91b266634acbcde6560820b83824.' region-info for snapshot=snaptb-testExportWithResetTtl
2024-12-08T04:27:04,030 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] snapshot.SnapshotManifest(246): Creating references for hfiles
2024-12-08T04:27:04,030 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] snapshot.SnapshotManifest(246): Creating references for hfiles
2024-12-08T04:27:04,030 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testExportWithResetTtl/c85ad77adf8bfc87828df2608e1dd846/cf/0bea5e92ce1e40cbb0247aafe81f8c2e] hfiles
2024-12-08T04:27:04,030 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testExportWithResetTtl/c85ad77adf8bfc87828df2608e1dd846/cf/0bea5e92ce1e40cbb0247aafe81f8c2e for snapshot=snaptb-testExportWithResetTtl
2024-12-08T04:27:04,030 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testExportWithResetTtl/a25f91b266634acbcde6560820b83824/cf/551b1e0c80f8429dbbec99a69128592b] hfiles
2024-12-08T04:27:04,030 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testExportWithResetTtl/a25f91b266634acbcde6560820b83824/cf/551b1e0c80f8429dbbec99a69128592b for snapshot=snaptb-testExportWithResetTtl
2024-12-08T04:27:04,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=46
2024-12-08T04:27:04,068 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741917_1093 (size=100)
2024-12-08T04:27:04,068 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741917_1093 (size=100)
2024-12-08T04:27:04,070 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741917_1093 (size=100)
2024-12-08T04:27:04,074 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,,1733632023063.a25f91b266634acbcde6560820b83824.
2024-12-08T04:27:04,074 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=47
2024-12-08T04:27:04,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster(4106): Remote procedure done, pid=47
2024-12-08T04:27:04,074 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region a25f91b266634acbcde6560820b83824
2024-12-08T04:27:04,075 INFO  [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=47, ppid=46, state=RUNNABLE; SnapshotRegionProcedure a25f91b266634acbcde6560820b83824
2024-12-08T04:27:04,078 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741918_1094 (size=100)
2024-12-08T04:27:04,080 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741918_1094 (size=100)
2024-12-08T04:27:04,080 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741918_1094 (size=100)
2024-12-08T04:27:04,080 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=47, ppid=46, state=SUCCESS; SnapshotRegionProcedure a25f91b266634acbcde6560820b83824 in 287 msec
2024-12-08T04:27:04,081 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,1,1733632023063.c85ad77adf8bfc87828df2608e1dd846.
2024-12-08T04:27:04,081 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=48
2024-12-08T04:27:04,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster(4106): Remote procedure done, pid=48
2024-12-08T04:27:04,082 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region c85ad77adf8bfc87828df2608e1dd846
2024-12-08T04:27:04,082 INFO  [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=48, ppid=46, state=RUNNABLE; SnapshotRegionProcedure c85ad77adf8bfc87828df2608e1dd846
2024-12-08T04:27:04,086 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=48, resume processing ppid=46
2024-12-08T04:27:04,086 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=48, ppid=46, state=SUCCESS; SnapshotRegionProcedure c85ad77adf8bfc87828df2608e1dd846 in 293 msec
2024-12-08T04:27:04,086 INFO  [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS
2024-12-08T04:27:04,087 INFO  [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION
2024-12-08T04:27:04,088 INFO  [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT
2024-12-08T04:27:04,088 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb-testExportWithResetTtl
2024-12-08T04:27:04,089 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl
2024-12-08T04:27:04,161 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741919_1095 (size=600)
2024-12-08T04:27:04,164 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741919_1095 (size=600)
2024-12-08T04:27:04,166 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741919_1095 (size=600)
2024-12-08T04:27:04,171 INFO  [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT
2024-12-08T04:27:04,187 INFO  [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT
2024-12-08T04:27:04,187 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/snaptb-testExportWithResetTtl
2024-12-08T04:27:04,190 INFO  [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_POST_OPERATION
2024-12-08T04:27:04,190 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 46
2024-12-08T04:27:04,196 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=46, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } in 438 msec
2024-12-08T04:27:04,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=46
2024-12-08T04:27:04,361 INFO  [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testExportWithResetTtl, procId: 46 completed
2024-12-08T04:27:04,374 INFO  [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632024374
2024-12-08T04:27:04,374 INFO  [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:41407, tgtDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632024374, rawTgtDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632024374, srcFsUri=hdfs://localhost:41407, srcDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720
2024-12-08T04:27:04,427 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:41407, inputRoot=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720
2024-12-08T04:27:04,427 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1548841327_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632024374, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632024374/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl
2024-12-08T04:27:04,431 INFO  [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity.
2024-12-08T04:27:04,441 INFO  [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/snaptb-testExportWithResetTtl to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632024374/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl
2024-12-08T04:27:04,469 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741920_1096 (size=143)
2024-12-08T04:27:04,469 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741920_1096 (size=143)
2024-12-08T04:27:04,470 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741920_1096 (size=143)
2024-12-08T04:27:04,482 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741921_1097 (size=600)
2024-12-08T04:27:04,483 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741921_1097 (size=600)
2024-12-08T04:27:04,483 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741921_1097 (size=600)
2024-12-08T04:27:04,518 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741922_1098 (size=141)
2024-12-08T04:27:04,519 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741922_1098 (size=141)
2024-12-08T04:27:04,519 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741922_1098 (size=141)
2024-12-08T04:27:04,521 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar
2024-12-08T04:27:04,522 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar
2024-12-08T04:27:04,522 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar
2024-12-08T04:27:04,523 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar
2024-12-08T04:27:04,811 WARN  [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-1_1/usercache/jenkins/appcache/application_1733631992429_0001/container_1733631992429_0001_01_000001/launch_container.sh]
2024-12-08T04:27:04,811 WARN  [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-1_1/usercache/jenkins/appcache/application_1733631992429_0001/container_1733631992429_0001_01_000001/container_tokens]
2024-12-08T04:27:04,811 WARN  [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-1_1/usercache/jenkins/appcache/application_1733631992429_0001/container_1733631992429_0001_01_000001/sysfs]
2024-12-08T04:27:04,814 INFO  [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733631992429_0001_000001 (auth:SIMPLE) from 127.0.0.1:59734
2024-12-08T04:27:05,633 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/hadoop-14489759986591659533.jar
2024-12-08T04:27:05,634 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar
2024-12-08T04:27:05,634 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar
2024-12-08T04:27:05,708 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/hadoop-10257805298597215396.jar
2024-12-08T04:27:05,709 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar
2024-12-08T04:27:05,709 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar
2024-12-08T04:27:05,709 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar
2024-12-08T04:27:05,710 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar
2024-12-08T04:27:05,710 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar
2024-12-08T04:27:05,710 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar
2024-12-08T04:27:05,711 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar
2024-12-08T04:27:05,711 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar
2024-12-08T04:27:05,711 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar
2024-12-08T04:27:05,711 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar
2024-12-08T04:27:05,712 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar
2024-12-08T04:27:05,712 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar
2024-12-08T04:27:05,712 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar
2024-12-08T04:27:05,713 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar
2024-12-08T04:27:05,713 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar
2024-12-08T04:27:05,713 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar
2024-12-08T04:27:05,714 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar
2024-12-08T04:27:05,714 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar
2024-12-08T04:27:05,715 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar
2024-12-08T04:27:05,715 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar
2024-12-08T04:27:05,715 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar
2024-12-08T04:27:05,715 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar
2024-12-08T04:27:05,716 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar
2024-12-08T04:27:05,716 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar
2024-12-08T04:27:05,716 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar
2024-12-08T04:27:05,805 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741923_1099 (size=127628)
2024-12-08T04:27:05,806 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741923_1099 (size=127628)
2024-12-08T04:27:05,806 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741923_1099 (size=127628)
2024-12-08T04:27:05,827 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741924_1100 (size=2172101)
2024-12-08T04:27:05,827 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741924_1100 (size=2172101)
2024-12-08T04:27:05,828 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741924_1100 (size=2172101)
2024-12-08T04:27:05,850 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741925_1101 (size=213228)
2024-12-08T04:27:05,851 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741925_1101 (size=213228)
2024-12-08T04:27:05,852 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741925_1101 (size=213228)
2024-12-08T04:27:05,886 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741926_1102 (size=1877034)
2024-12-08T04:27:05,887 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741926_1102 (size=1877034)
2024-12-08T04:27:05,888 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741926_1102 (size=1877034)
2024-12-08T04:27:05,918 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741927_1103 (size=533455)
2024-12-08T04:27:05,919 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741927_1103 (size=533455)
2024-12-08T04:27:05,919 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741927_1103 (size=533455)
2024-12-08T04:27:05,961 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741928_1104 (size=7280644)
2024-12-08T04:27:05,962 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741928_1104 (size=7280644)
2024-12-08T04:27:05,963 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741928_1104 (size=7280644)
2024-12-08T04:27:05,996 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741929_1105 (size=4188619)
2024-12-08T04:27:05,996 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741929_1105 (size=4188619)
2024-12-08T04:27:05,997 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741929_1105 (size=4188619)
2024-12-08T04:27:06,007 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741930_1106 (size=20406)
2024-12-08T04:27:06,008 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741930_1106 (size=20406)
2024-12-08T04:27:06,009 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741930_1106 (size=20406)
2024-12-08T04:27:06,031 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741931_1107 (size=75495)
2024-12-08T04:27:06,031 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741931_1107 (size=75495)
2024-12-08T04:27:06,031 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741931_1107 (size=75495)
2024-12-08T04:27:06,051 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741932_1108 (size=45609)
2024-12-08T04:27:06,052 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741932_1108 (size=45609)
2024-12-08T04:27:06,052 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741932_1108 (size=45609)
2024-12-08T04:27:06,066 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741933_1109 (size=110084)
2024-12-08T04:27:06,066 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741933_1109 (size=110084)
2024-12-08T04:27:06,067 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741933_1109 (size=110084)
2024-12-08T04:27:06,086 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741934_1110 (size=1323991)
2024-12-08T04:27:06,086 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741934_1110 (size=1323991)
2024-12-08T04:27:06,088 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741934_1110 (size=1323991)
2024-12-08T04:27:06,098 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741935_1111 (size=23076)
2024-12-08T04:27:06,098 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741935_1111 (size=23076)
2024-12-08T04:27:06,098 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741935_1111 (size=23076)
2024-12-08T04:27:06,132 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741936_1112 (size=6350155)
2024-12-08T04:27:06,132 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741936_1112 (size=6350155)
2024-12-08T04:27:06,134 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741936_1112 (size=6350155)
2024-12-08T04:27:06,159 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741937_1113 (size=126803)
2024-12-08T04:27:06,160 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741937_1113 (size=126803)
2024-12-08T04:27:06,160 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741937_1113 (size=126803)
2024-12-08T04:27:06,179 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741938_1114 (size=322274)
2024-12-08T04:27:06,179 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741938_1114 (size=322274)
2024-12-08T04:27:06,181 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741938_1114 (size=322274)
2024-12-08T04:27:06,200 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741939_1115 (size=1832290)
2024-12-08T04:27:06,200 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741939_1115 (size=1832290)
2024-12-08T04:27:06,200 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741939_1115 (size=1832290)
2024-12-08T04:27:06,218 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741940_1116 (size=30081)
2024-12-08T04:27:06,218 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741940_1116 (size=30081)
2024-12-08T04:27:06,218 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741940_1116 (size=30081)
2024-12-08T04:27:06,229 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741941_1117 (size=53616)
2024-12-08T04:27:06,229 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741941_1117 (size=53616)
2024-12-08T04:27:06,230 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741941_1117 (size=53616)
2024-12-08T04:27:06,244 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741942_1118 (size=29229)
2024-12-08T04:27:06,244 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741942_1118 (size=29229)
2024-12-08T04:27:06,244 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741942_1118 (size=29229)
2024-12-08T04:27:06,255 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741943_1119 (size=169089)
2024-12-08T04:27:06,255 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741943_1119 (size=169089)
2024-12-08T04:27:06,256 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741943_1119 (size=169089)
2024-12-08T04:27:06,265 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741944_1120 (size=451756)
2024-12-08T04:27:06,266 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741944_1120 (size=451756)
2024-12-08T04:27:06,266 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741944_1120 (size=451756)
2024-12-08T04:27:06,315 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741945_1121 (size=5175431)
2024-12-08T04:27:06,316 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741945_1121 (size=5175431)
2024-12-08T04:27:06,317 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741945_1121 (size=5175431)
2024-12-08T04:27:06,378 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741946_1122 (size=136454)
2024-12-08T04:27:06,380 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741946_1122 (size=136454)
2024-12-08T04:27:06,380 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741946_1122 (size=136454)
2024-12-08T04:27:06,414 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741947_1123 (size=907852)
2024-12-08T04:27:06,416 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741947_1123 (size=907852)
2024-12-08T04:27:06,420 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741947_1123 (size=907852)
2024-12-08T04:27:06,424 WARN  [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties
2024-12-08T04:27:06,527 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741948_1124 (size=3317408)
2024-12-08T04:27:06,527 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741948_1124 (size=3317408)
2024-12-08T04:27:06,528 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741948_1124 (size=3317408)
2024-12-08T04:27:06,562 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741949_1125 (size=503880)
2024-12-08T04:27:06,562 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741949_1125 (size=503880)
2024-12-08T04:27:06,562 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741949_1125 (size=503880)
2024-12-08T04:27:06,623 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741950_1126 (size=4695811)
2024-12-08T04:27:06,623 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741950_1126 (size=4695811)
2024-12-08T04:27:06,623 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741950_1126 (size=4695811)
2024-12-08T04:27:06,626 WARN  [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set.  User classes may not be found. See Job or Job#setJar(String).
2024-12-08T04:27:06,630 INFO  [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb-testExportWithResetTtl' hfile list
2024-12-08T04:27:06,633 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K
2024-12-08T04:27:06,670 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741951_1127 (size=324)
2024-12-08T04:27:06,671 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741951_1127 (size=324)
2024-12-08T04:27:06,671 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741951_1127 (size=324)
2024-12-08T04:27:07,080 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741952_1128 (size=15)
2024-12-08T04:27:07,081 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741952_1128 (size=15)
2024-12-08T04:27:07,081 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741952_1128 (size=15)
2024-12-08T04:27:07,111 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741953_1129 (size=304881)
2024-12-08T04:27:07,112 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741953_1129 (size=304881)
2024-12-08T04:27:07,113 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741953_1129 (size=304881)
2024-12-08T04:27:07,142 WARN  [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start
2024-12-08T04:27:07,142 WARN  [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start
2024-12-08T04:27:07,803 INFO  [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733631992429_0002_000001 (auth:SIMPLE) from 127.0.0.1:59748
2024-12-08T04:27:09,031 INFO  [master/428ded7e54d6:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker!
2024-12-08T04:27:09,031 INFO  [master/428ded7e54d6:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore!
2024-12-08T04:27:15,409 INFO  [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733631992429_0002_000001 (auth:SIMPLE) from 127.0.0.1:46686
2024-12-08T04:27:15,883 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741954_1130 (size=350555)
2024-12-08T04:27:15,884 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741954_1130 (size=350555)
2024-12-08T04:27:15,885 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741954_1130 (size=350555)
2024-12-08T04:27:17,709 INFO  [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733631992429_0002_000001 (auth:SIMPLE) from 127.0.0.1:49856
2024-12-08T04:27:21,433 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741955_1131 (size=8122)
2024-12-08T04:27:21,433 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741955_1131 (size=8122)
2024-12-08T04:27:21,433 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741955_1131 (size=8122)
2024-12-08T04:27:21,516 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741956_1132 (size=5490)
2024-12-08T04:27:21,517 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741956_1132 (size=5490)
2024-12-08T04:27:21,518 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741956_1132 (size=5490)
2024-12-08T04:27:21,714 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741957_1133 (size=17398)
2024-12-08T04:27:21,720 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741957_1133 (size=17398)
2024-12-08T04:27:21,732 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741957_1133 (size=17398)
2024-12-08T04:27:21,852 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741958_1134 (size=461)
2024-12-08T04:27:21,852 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741958_1134 (size=461)
2024-12-08T04:27:21,852 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741958_1134 (size=461)
2024-12-08T04:27:21,879 WARN  [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-1_3/usercache/jenkins/appcache/application_1733631992429_0002/container_1733631992429_0002_01_000002/launch_container.sh]
2024-12-08T04:27:21,879 WARN  [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-1_3/usercache/jenkins/appcache/application_1733631992429_0002/container_1733631992429_0002_01_000002/container_tokens]
2024-12-08T04:27:21,879 WARN  [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-1_3/usercache/jenkins/appcache/application_1733631992429_0002/container_1733631992429_0002_01_000002/sysfs]
2024-12-08T04:27:22,002 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details.
2024-12-08T04:27:22,303 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741959_1135 (size=17398)
2024-12-08T04:27:22,303 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741959_1135 (size=17398)
2024-12-08T04:27:22,305 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741959_1135 (size=17398)
2024-12-08T04:27:22,353 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741960_1136 (size=350555)
2024-12-08T04:27:22,353 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741960_1136 (size=350555)
2024-12-08T04:27:22,354 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741960_1136 (size=350555)
2024-12-08T04:27:22,373 INFO  [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733631992429_0002_000001 (auth:SIMPLE) from 127.0.0.1:49860
2024-12-08T04:27:24,308 INFO  [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export
2024-12-08T04:27:24,309 INFO  [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity.
2024-12-08T04:27:24,317 INFO  [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb-testExportWithResetTtl
2024-12-08T04:27:24,317 INFO  [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot
2024-12-08T04:27:24,318 INFO  [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state
2024-12-08T04:27:24,318 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1548841327_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/snaptb-testExportWithResetTtl
2024-12-08T04:27:24,318 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo
2024-12-08T04:27:24,318 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest
2024-12-08T04:27:24,318 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1548841327_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632024374/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632024374/.hbase-snapshot/snaptb-testExportWithResetTtl
2024-12-08T04:27:24,319 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632024374/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo
2024-12-08T04:27:24,319 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632024374/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest
2024-12-08T04:27:24,326 INFO  [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testExportWithResetTtl
2024-12-08T04:27:24,327 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testExportWithResetTtl
2024-12-08T04:27:24,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure2.ProcedureExecutor(1098): Stored pid=49, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testExportWithResetTtl
2024-12-08T04:27:24,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49
2024-12-08T04:27:24,335 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733632044335"}]},"ts":"1733632044335"}
2024-12-08T04:27:24,337 INFO  [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testExportWithResetTtl, state=DISABLING in hbase:meta
2024-12-08T04:27:24,340 INFO  [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testExportWithResetTtl to state=DISABLING
2024-12-08T04:27:24,341 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=50, ppid=49, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testExportWithResetTtl}]
2024-12-08T04:27:24,343 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=51, ppid=50, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testExportWithResetTtl, region=a25f91b266634acbcde6560820b83824, UNASSIGN}, {pid=52, ppid=50, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testExportWithResetTtl, region=c85ad77adf8bfc87828df2608e1dd846, UNASSIGN}]
2024-12-08T04:27:24,344 INFO  [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=52, ppid=50, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testExportWithResetTtl, region=c85ad77adf8bfc87828df2608e1dd846, UNASSIGN
2024-12-08T04:27:24,345 INFO  [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=51, ppid=50, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testExportWithResetTtl, region=a25f91b266634acbcde6560820b83824, UNASSIGN
2024-12-08T04:27:24,346 INFO  [PEWorker-3 {}] assignment.RegionStateStore(202): pid=52 updating hbase:meta row=c85ad77adf8bfc87828df2608e1dd846, regionState=CLOSING, regionLocation=428ded7e54d6,45955,1733631983994
2024-12-08T04:27:24,346 INFO  [PEWorker-2 {}] assignment.RegionStateStore(202): pid=51 updating hbase:meta row=a25f91b266634acbcde6560820b83824, regionState=CLOSING, regionLocation=428ded7e54d6,46421,1733631984115
2024-12-08T04:27:24,348 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false
2024-12-08T04:27:24,349 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=53, ppid=51, state=RUNNABLE; CloseRegionProcedure a25f91b266634acbcde6560820b83824, server=428ded7e54d6,46421,1733631984115}]
2024-12-08T04:27:24,350 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false
2024-12-08T04:27:24,350 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=54, ppid=52, state=RUNNABLE; CloseRegionProcedure c85ad77adf8bfc87828df2608e1dd846, server=428ded7e54d6,45955,1733631983994}]
2024-12-08T04:27:24,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49
2024-12-08T04:27:24,501 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,46421,1733631984115
2024-12-08T04:27:24,502 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] handler.UnassignRegionHandler(124): Close a25f91b266634acbcde6560820b83824
2024-12-08T04:27:24,502 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false
2024-12-08T04:27:24,502 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1681): Closing a25f91b266634acbcde6560820b83824, disabling compactions & flushes
2024-12-08T04:27:24,502 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1703): Closing region testExportWithResetTtl,,1733632023063.a25f91b266634acbcde6560820b83824.
2024-12-08T04:27:24,502 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportWithResetTtl,,1733632023063.a25f91b266634acbcde6560820b83824.
2024-12-08T04:27:24,502 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1791): Acquired close lock on testExportWithResetTtl,,1733632023063.a25f91b266634acbcde6560820b83824. after waiting 0 ms
2024-12-08T04:27:24,502 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1801): Updates disabled for region testExportWithResetTtl,,1733632023063.a25f91b266634acbcde6560820b83824.
2024-12-08T04:27:24,504 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,45955,1733631983994
2024-12-08T04:27:24,505 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] handler.UnassignRegionHandler(124): Close c85ad77adf8bfc87828df2608e1dd846
2024-12-08T04:27:24,505 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false
2024-12-08T04:27:24,505 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1681): Closing c85ad77adf8bfc87828df2608e1dd846, disabling compactions & flushes
2024-12-08T04:27:24,505 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1703): Closing region testExportWithResetTtl,1,1733632023063.c85ad77adf8bfc87828df2608e1dd846.
2024-12-08T04:27:24,505 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportWithResetTtl,1,1733632023063.c85ad77adf8bfc87828df2608e1dd846.
2024-12-08T04:27:24,505 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1791): Acquired close lock on testExportWithResetTtl,1,1733632023063.c85ad77adf8bfc87828df2608e1dd846. after waiting 0 ms
2024-12-08T04:27:24,505 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1801): Updates disabled for region testExportWithResetTtl,1,1733632023063.c85ad77adf8bfc87828df2608e1dd846.
2024-12-08T04:27:24,512 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testExportWithResetTtl/a25f91b266634acbcde6560820b83824/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1
2024-12-08T04:27:24,513 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testExportWithResetTtl/c85ad77adf8bfc87828df2608e1dd846/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1
2024-12-08T04:27:24,513 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController
2024-12-08T04:27:24,513 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController
2024-12-08T04:27:24,513 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1922): Closed testExportWithResetTtl,1,1733632023063.c85ad77adf8bfc87828df2608e1dd846.
2024-12-08T04:27:24,513 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1922): Closed testExportWithResetTtl,,1733632023063.a25f91b266634acbcde6560820b83824.
2024-12-08T04:27:24,514 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1635): Region close journal for a25f91b266634acbcde6560820b83824:

2024-12-08T04:27:24,514 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1635): Region close journal for c85ad77adf8bfc87828df2608e1dd846:

2024-12-08T04:27:24,515 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] handler.UnassignRegionHandler(170): Closed a25f91b266634acbcde6560820b83824
2024-12-08T04:27:24,516 INFO  [PEWorker-1 {}] assignment.RegionStateStore(202): pid=51 updating hbase:meta row=a25f91b266634acbcde6560820b83824, regionState=CLOSED
2024-12-08T04:27:24,517 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] handler.UnassignRegionHandler(170): Closed c85ad77adf8bfc87828df2608e1dd846
2024-12-08T04:27:24,517 INFO  [PEWorker-5 {}] assignment.RegionStateStore(202): pid=52 updating hbase:meta row=c85ad77adf8bfc87828df2608e1dd846, regionState=CLOSED
2024-12-08T04:27:24,520 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=53, resume processing ppid=51
2024-12-08T04:27:24,523 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=54, resume processing ppid=52
2024-12-08T04:27:24,523 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=54, ppid=52, state=SUCCESS; CloseRegionProcedure c85ad77adf8bfc87828df2608e1dd846, server=428ded7e54d6,45955,1733631983994 in 169 msec
2024-12-08T04:27:24,525 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=51, ppid=50, state=SUCCESS; TransitRegionStateProcedure table=testExportWithResetTtl, region=a25f91b266634acbcde6560820b83824, UNASSIGN in 177 msec
2024-12-08T04:27:24,525 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=53, ppid=51, state=SUCCESS; CloseRegionProcedure a25f91b266634acbcde6560820b83824, server=428ded7e54d6,46421,1733631984115 in 170 msec
2024-12-08T04:27:24,527 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=52, resume processing ppid=50
2024-12-08T04:27:24,528 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=52, ppid=50, state=SUCCESS; TransitRegionStateProcedure table=testExportWithResetTtl, region=c85ad77adf8bfc87828df2608e1dd846, UNASSIGN in 180 msec
2024-12-08T04:27:24,531 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=50, resume processing ppid=49
2024-12-08T04:27:24,531 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=50, ppid=49, state=SUCCESS; CloseTableRegionsProcedure table=testExportWithResetTtl in 187 msec
2024-12-08T04:27:24,532 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733632044532"}]},"ts":"1733632044532"}
2024-12-08T04:27:24,534 INFO  [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testExportWithResetTtl, state=DISABLED in hbase:meta
2024-12-08T04:27:24,536 INFO  [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testExportWithResetTtl to state=DISABLED
2024-12-08T04:27:24,538 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=49, state=SUCCESS; DisableTableProcedure table=testExportWithResetTtl in 209 msec
2024-12-08T04:27:24,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49
2024-12-08T04:27:24,636 INFO  [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testExportWithResetTtl, procId: 49 completed
2024-12-08T04:27:24,637 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testExportWithResetTtl
2024-12-08T04:27:24,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure2.ProcedureExecutor(1098): Stored pid=55, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testExportWithResetTtl
2024-12-08T04:27:24,639 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=55, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testExportWithResetTtl
2024-12-08T04:27:24,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] access.PermissionStorage(259): Removing permissions of removed table testExportWithResetTtl
2024-12-08T04:27:24,640 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=55, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testExportWithResetTtl
2024-12-08T04:27:24,642 INFO  [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41743 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testExportWithResetTtl
2024-12-08T04:27:24,646 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testExportWithResetTtl/c85ad77adf8bfc87828df2608e1dd846
2024-12-08T04:27:24,646 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testExportWithResetTtl/a25f91b266634acbcde6560820b83824
2024-12-08T04:27:24,647 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl
2024-12-08T04:27:24,647 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl
2024-12-08T04:27:24,647 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46421-0x1006fe072e80002, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl
2024-12-08T04:27:24,647 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl
2024-12-08T04:27:24,648 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF
2024-12-08T04:27:24,649 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF
2024-12-08T04:27:24,649 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF
2024-12-08T04:27:24,649 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF
2024-12-08T04:27:24,651 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testExportWithResetTtl/a25f91b266634acbcde6560820b83824/cf, FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testExportWithResetTtl/a25f91b266634acbcde6560820b83824/recovered.edits]
2024-12-08T04:27:24,651 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testExportWithResetTtl/c85ad77adf8bfc87828df2608e1dd846/cf, FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testExportWithResetTtl/c85ad77adf8bfc87828df2608e1dd846/recovered.edits]
2024-12-08T04:27:24,656 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl
2024-12-08T04:27:24,656 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:27:24,656 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl
2024-12-08T04:27:24,656 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl
2024-12-08T04:27:24,656 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:27:24,656 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:27:24,656 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46421-0x1006fe072e80002, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl
2024-12-08T04:27:24,656 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46421-0x1006fe072e80002, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:27:24,657 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04
2024-12-08T04:27:24,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55
2024-12-08T04:27:24,658 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04
2024-12-08T04:27:24,658 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04
2024-12-08T04:27:24,658 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04
2024-12-08T04:27:24,659 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testExportWithResetTtl/c85ad77adf8bfc87828df2608e1dd846/cf/0bea5e92ce1e40cbb0247aafe81f8c2e to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/archive/data/default/testExportWithResetTtl/c85ad77adf8bfc87828df2608e1dd846/cf/0bea5e92ce1e40cbb0247aafe81f8c2e
2024-12-08T04:27:24,659 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testExportWithResetTtl/a25f91b266634acbcde6560820b83824/cf/551b1e0c80f8429dbbec99a69128592b to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/archive/data/default/testExportWithResetTtl/a25f91b266634acbcde6560820b83824/cf/551b1e0c80f8429dbbec99a69128592b
2024-12-08T04:27:24,663 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testExportWithResetTtl/c85ad77adf8bfc87828df2608e1dd846/recovered.edits/8.seqid to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/archive/data/default/testExportWithResetTtl/c85ad77adf8bfc87828df2608e1dd846/recovered.edits/8.seqid
2024-12-08T04:27:24,664 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testExportWithResetTtl/a25f91b266634acbcde6560820b83824/recovered.edits/8.seqid to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/archive/data/default/testExportWithResetTtl/a25f91b266634acbcde6560820b83824/recovered.edits/8.seqid
2024-12-08T04:27:24,664 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testExportWithResetTtl/c85ad77adf8bfc87828df2608e1dd846
2024-12-08T04:27:24,665 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testExportWithResetTtl/a25f91b266634acbcde6560820b83824
2024-12-08T04:27:24,665 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testExportWithResetTtl regions
2024-12-08T04:27:24,667 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=55, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testExportWithResetTtl
2024-12-08T04:27:24,671 WARN  [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testExportWithResetTtl from hbase:meta
2024-12-08T04:27:24,673 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'testExportWithResetTtl' descriptor.
2024-12-08T04:27:24,674 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=55, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testExportWithResetTtl
2024-12-08T04:27:24,674 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'testExportWithResetTtl' from region states.
2024-12-08T04:27:24,675 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testExportWithResetTtl,,1733632023063.a25f91b266634acbcde6560820b83824.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733632044674"}]},"ts":"9223372036854775807"}
2024-12-08T04:27:24,675 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testExportWithResetTtl,1,1733632023063.c85ad77adf8bfc87828df2608e1dd846.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733632044674"}]},"ts":"9223372036854775807"}
2024-12-08T04:27:24,677 INFO  [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META
2024-12-08T04:27:24,677 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => a25f91b266634acbcde6560820b83824, NAME => 'testExportWithResetTtl,,1733632023063.a25f91b266634acbcde6560820b83824.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => c85ad77adf8bfc87828df2608e1dd846, NAME => 'testExportWithResetTtl,1,1733632023063.c85ad77adf8bfc87828df2608e1dd846.', STARTKEY => '1', ENDKEY => ''}]
2024-12-08T04:27:24,677 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'testExportWithResetTtl' as deleted.
2024-12-08T04:27:24,677 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733632044677"}]},"ts":"9223372036854775807"}
2024-12-08T04:27:24,679 INFO  [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table testExportWithResetTtl state from META
2024-12-08T04:27:24,681 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=55, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testExportWithResetTtl
2024-12-08T04:27:24,682 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=55, state=SUCCESS; DeleteTableProcedure table=testExportWithResetTtl in 44 msec
2024-12-08T04:27:24,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55
2024-12-08T04:27:24,759 INFO  [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testExportWithResetTtl, procId: 55 completed
2024-12-08T04:27:24,760 INFO  [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportWithResetTtl
2024-12-08T04:27:24,760 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportWithResetTtl
2024-12-08T04:27:24,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure2.ProcedureExecutor(1098): Stored pid=56, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportWithResetTtl
2024-12-08T04:27:24,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=56
2024-12-08T04:27:24,764 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733632044764"}]},"ts":"1733632044764"}
2024-12-08T04:27:24,766 INFO  [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithResetTtl, state=DISABLING in hbase:meta
2024-12-08T04:27:24,768 INFO  [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithResetTtl to state=DISABLING
2024-12-08T04:27:24,768 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=57, ppid=56, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl}]
2024-12-08T04:27:24,770 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=4f0c309d68305294d60edeb45abcb216, UNASSIGN}, {pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=640fdbcf8bc742a7d599b5b4cf201f23, UNASSIGN}]
2024-12-08T04:27:24,771 INFO  [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=640fdbcf8bc742a7d599b5b4cf201f23, UNASSIGN
2024-12-08T04:27:24,771 INFO  [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=4f0c309d68305294d60edeb45abcb216, UNASSIGN
2024-12-08T04:27:24,772 INFO  [PEWorker-4 {}] assignment.RegionStateStore(202): pid=59 updating hbase:meta row=640fdbcf8bc742a7d599b5b4cf201f23, regionState=CLOSING, regionLocation=428ded7e54d6,41743,1733631984189
2024-12-08T04:27:24,772 INFO  [PEWorker-5 {}] assignment.RegionStateStore(202): pid=58 updating hbase:meta row=4f0c309d68305294d60edeb45abcb216, regionState=CLOSING, regionLocation=428ded7e54d6,45955,1733631983994
2024-12-08T04:27:24,773 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false
2024-12-08T04:27:24,773 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=60, ppid=59, state=RUNNABLE; CloseRegionProcedure 640fdbcf8bc742a7d599b5b4cf201f23, server=428ded7e54d6,41743,1733631984189}]
2024-12-08T04:27:24,774 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false
2024-12-08T04:27:24,774 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=61, ppid=58, state=RUNNABLE; CloseRegionProcedure 4f0c309d68305294d60edeb45abcb216, server=428ded7e54d6,45955,1733631983994}]
2024-12-08T04:27:24,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=56
2024-12-08T04:27:24,925 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,41743,1733631984189
2024-12-08T04:27:24,926 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(124): Close 640fdbcf8bc742a7d599b5b4cf201f23
2024-12-08T04:27:24,926 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,45955,1733631983994
2024-12-08T04:27:24,926 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false
2024-12-08T04:27:24,926 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1681): Closing 640fdbcf8bc742a7d599b5b4cf201f23, disabling compactions & flushes
2024-12-08T04:27:24,926 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1703): Closing region testtb-testExportWithResetTtl,1,1733632021390.640fdbcf8bc742a7d599b5b4cf201f23.
2024-12-08T04:27:24,927 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithResetTtl,1,1733632021390.640fdbcf8bc742a7d599b5b4cf201f23.
2024-12-08T04:27:24,927 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithResetTtl,1,1733632021390.640fdbcf8bc742a7d599b5b4cf201f23. after waiting 0 ms
2024-12-08T04:27:24,927 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithResetTtl,1,1733632021390.640fdbcf8bc742a7d599b5b4cf201f23.
2024-12-08T04:27:24,927 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(124): Close 4f0c309d68305294d60edeb45abcb216
2024-12-08T04:27:24,927 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false
2024-12-08T04:27:24,927 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1681): Closing 4f0c309d68305294d60edeb45abcb216, disabling compactions & flushes
2024-12-08T04:27:24,927 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1703): Closing region testtb-testExportWithResetTtl,,1733632021390.4f0c309d68305294d60edeb45abcb216.
2024-12-08T04:27:24,927 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithResetTtl,,1733632021390.4f0c309d68305294d60edeb45abcb216.
2024-12-08T04:27:24,927 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithResetTtl,,1733632021390.4f0c309d68305294d60edeb45abcb216. after waiting 0 ms
2024-12-08T04:27:24,927 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithResetTtl,,1733632021390.4f0c309d68305294d60edeb45abcb216.
2024-12-08T04:27:24,934 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithResetTtl/4f0c309d68305294d60edeb45abcb216/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1
2024-12-08T04:27:24,934 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithResetTtl/640fdbcf8bc742a7d599b5b4cf201f23/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1
2024-12-08T04:27:24,935 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController
2024-12-08T04:27:24,935 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1922): Closed testtb-testExportWithResetTtl,,1733632021390.4f0c309d68305294d60edeb45abcb216.
2024-12-08T04:27:24,935 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController
2024-12-08T04:27:24,935 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1635): Region close journal for 4f0c309d68305294d60edeb45abcb216:

2024-12-08T04:27:24,935 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1922): Closed testtb-testExportWithResetTtl,1,1733632021390.640fdbcf8bc742a7d599b5b4cf201f23.
2024-12-08T04:27:24,935 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1635): Region close journal for 640fdbcf8bc742a7d599b5b4cf201f23:

2024-12-08T04:27:24,937 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(170): Closed 4f0c309d68305294d60edeb45abcb216
2024-12-08T04:27:24,938 INFO  [PEWorker-2 {}] assignment.RegionStateStore(202): pid=58 updating hbase:meta row=4f0c309d68305294d60edeb45abcb216, regionState=CLOSED
2024-12-08T04:27:24,938 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(170): Closed 640fdbcf8bc742a7d599b5b4cf201f23
2024-12-08T04:27:24,938 INFO  [PEWorker-4 {}] assignment.RegionStateStore(202): pid=59 updating hbase:meta row=640fdbcf8bc742a7d599b5b4cf201f23, regionState=CLOSED
2024-12-08T04:27:24,942 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=61, resume processing ppid=58
2024-12-08T04:27:24,942 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=61, ppid=58, state=SUCCESS; CloseRegionProcedure 4f0c309d68305294d60edeb45abcb216, server=428ded7e54d6,45955,1733631983994 in 166 msec
2024-12-08T04:27:24,944 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=60, resume processing ppid=59
2024-12-08T04:27:24,944 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=60, ppid=59, state=SUCCESS; CloseRegionProcedure 640fdbcf8bc742a7d599b5b4cf201f23, server=428ded7e54d6,41743,1733631984189 in 168 msec
2024-12-08T04:27:24,945 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=58, ppid=57, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=4f0c309d68305294d60edeb45abcb216, UNASSIGN in 172 msec
2024-12-08T04:27:24,946 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=59, resume processing ppid=57
2024-12-08T04:27:24,946 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=59, ppid=57, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=640fdbcf8bc742a7d599b5b4cf201f23, UNASSIGN in 174 msec
2024-12-08T04:27:24,949 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=57, resume processing ppid=56
2024-12-08T04:27:24,949 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=57, ppid=56, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl in 179 msec
2024-12-08T04:27:24,950 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733632044950"}]},"ts":"1733632044950"}
2024-12-08T04:27:24,952 INFO  [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithResetTtl, state=DISABLED in hbase:meta
2024-12-08T04:27:24,954 INFO  [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithResetTtl to state=DISABLED
2024-12-08T04:27:24,956 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=56, state=SUCCESS; DisableTableProcedure table=testtb-testExportWithResetTtl in 194 msec
2024-12-08T04:27:25,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=56
2024-12-08T04:27:25,067 INFO  [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportWithResetTtl, procId: 56 completed
2024-12-08T04:27:25,067 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportWithResetTtl
2024-12-08T04:27:25,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure2.ProcedureExecutor(1098): Stored pid=62, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportWithResetTtl
2024-12-08T04:27:25,070 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=62, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportWithResetTtl
2024-12-08T04:27:25,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportWithResetTtl
2024-12-08T04:27:25,071 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=62, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportWithResetTtl
2024-12-08T04:27:25,072 INFO  [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41743 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportWithResetTtl
2024-12-08T04:27:25,076 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46421-0x1006fe072e80002, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl
2024-12-08T04:27:25,076 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl
2024-12-08T04:27:25,076 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl
2024-12-08T04:27:25,076 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl
2024-12-08T04:27:25,077 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF
2024-12-08T04:27:25,077 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF
2024-12-08T04:27:25,077 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF
2024-12-08T04:27:25,077 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF
2024-12-08T04:27:25,077 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithResetTtl/4f0c309d68305294d60edeb45abcb216
2024-12-08T04:27:25,077 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithResetTtl/640fdbcf8bc742a7d599b5b4cf201f23
2024-12-08T04:27:25,079 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl
2024-12-08T04:27:25,079 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:27:25,079 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl
2024-12-08T04:27:25,079 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:27:25,079 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46421-0x1006fe072e80002, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl
2024-12-08T04:27:25,079 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46421-0x1006fe072e80002, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:27:25,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl
2024-12-08T04:27:25,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:27:25,081 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithResetTtl/4f0c309d68305294d60edeb45abcb216/cf, FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithResetTtl/4f0c309d68305294d60edeb45abcb216/recovered.edits]
2024-12-08T04:27:25,081 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithResetTtl/640fdbcf8bc742a7d599b5b4cf201f23/cf, FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithResetTtl/640fdbcf8bc742a7d599b5b4cf201f23/recovered.edits]
2024-12-08T04:27:25,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=62
2024-12-08T04:27:25,086 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithResetTtl/4f0c309d68305294d60edeb45abcb216/cf/583d366d1a2441a59b53ea260cdee40c to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/archive/data/default/testtb-testExportWithResetTtl/4f0c309d68305294d60edeb45abcb216/cf/583d366d1a2441a59b53ea260cdee40c
2024-12-08T04:27:25,086 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithResetTtl/640fdbcf8bc742a7d599b5b4cf201f23/cf/de5c8814dd0f47aa97ad39b0df3aace9 to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/archive/data/default/testtb-testExportWithResetTtl/640fdbcf8bc742a7d599b5b4cf201f23/cf/de5c8814dd0f47aa97ad39b0df3aace9
2024-12-08T04:27:25,089 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithResetTtl/4f0c309d68305294d60edeb45abcb216/recovered.edits/9.seqid to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/archive/data/default/testtb-testExportWithResetTtl/4f0c309d68305294d60edeb45abcb216/recovered.edits/9.seqid
2024-12-08T04:27:25,090 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithResetTtl/640fdbcf8bc742a7d599b5b4cf201f23/recovered.edits/9.seqid to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/archive/data/default/testtb-testExportWithResetTtl/640fdbcf8bc742a7d599b5b4cf201f23/recovered.edits/9.seqid
2024-12-08T04:27:25,090 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithResetTtl/4f0c309d68305294d60edeb45abcb216
2024-12-08T04:27:25,091 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithResetTtl/640fdbcf8bc742a7d599b5b4cf201f23
2024-12-08T04:27:25,091 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithResetTtl regions
2024-12-08T04:27:25,098 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=62, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportWithResetTtl
2024-12-08T04:27:25,101 WARN  [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithResetTtl from hbase:meta
2024-12-08T04:27:25,104 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithResetTtl' descriptor.
2024-12-08T04:27:25,105 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=62, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportWithResetTtl
2024-12-08T04:27:25,105 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithResetTtl' from region states.
2024-12-08T04:27:25,105 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,,1733632021390.4f0c309d68305294d60edeb45abcb216.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733632045105"}]},"ts":"9223372036854775807"}
2024-12-08T04:27:25,105 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,1,1733632021390.640fdbcf8bc742a7d599b5b4cf201f23.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733632045105"}]},"ts":"9223372036854775807"}
2024-12-08T04:27:25,107 INFO  [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META
2024-12-08T04:27:25,108 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 4f0c309d68305294d60edeb45abcb216, NAME => 'testtb-testExportWithResetTtl,,1733632021390.4f0c309d68305294d60edeb45abcb216.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 640fdbcf8bc742a7d599b5b4cf201f23, NAME => 'testtb-testExportWithResetTtl,1,1733632021390.640fdbcf8bc742a7d599b5b4cf201f23.', STARTKEY => '1', ENDKEY => ''}]
2024-12-08T04:27:25,108 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithResetTtl' as deleted.
2024-12-08T04:27:25,108 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733632045108"}]},"ts":"9223372036854775807"}
2024-12-08T04:27:25,110 INFO  [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportWithResetTtl state from META
2024-12-08T04:27:25,112 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=62, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportWithResetTtl
2024-12-08T04:27:25,113 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=62, state=SUCCESS; DeleteTableProcedure table=testtb-testExportWithResetTtl in 45 msec
2024-12-08T04:27:25,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=62
2024-12-08T04:27:25,183 INFO  [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportWithResetTtl, procId: 62 completed
2024-12-08T04:27:25,196 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithResetTtl"

2024-12-08T04:27:25,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportWithResetTtl
2024-12-08T04:27:25,201 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb-testExportWithResetTtl"

2024-12-08T04:27:25,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb-testExportWithResetTtl
2024-12-08T04:27:25,205 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithResetTtl"

2024-12-08T04:27:25,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportWithResetTtl
2024-12-08T04:27:25,238 INFO  [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithResetTtl Thread=784 (was 770)
Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46241
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704)
	java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903)
	app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: LogDeleter #0
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
	java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
	java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: RPCClient-NioEventLoopGroup-6-4
	java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
	java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
	app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62)
	app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883)
	app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526)
	app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
	app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
	app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: IPC Client (30462390) connection to localhost/127.0.0.1:46241 from jenkins
	java.base@17.0.11/java.lang.Object.wait(Native Method)
	app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042)
	app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093)

Potentially hanging thread: hconnection-0x28111a62-shared-pool-11
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1428193526_1 at /127.0.0.1:44600 [Waiting for operation #2]
	java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
	java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
	app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335)
	app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131)
	java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244)
	java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263)
	java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334)
	java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312)
	app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72)
	app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: HFileArchiver-6
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: RS-EventLoopGroup-3-2
	app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method)
	app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220)
	app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213)
	app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308)
	app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365)
	app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
	app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
	app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: RS-EventLoopGroup-3-3
	app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method)
	app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220)
	app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213)
	app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308)
	app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365)
	app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
	app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
	app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: hconnection-0x28111a62-shared-pool-14
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: Thread-2017
	java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method)
	java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276)
	java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282)
	java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343)
	java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281)
	java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324)
	java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189)
	java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177)
	java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162)
	java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329)
	java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396)
	app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025)

Potentially hanging thread: HFileArchiver-5
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: ApplicationMasterLauncher #2
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1428193526_1 at /127.0.0.1:43568 [Waiting for operation #2]
	java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
	java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
	app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335)
	app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131)
	java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244)
	java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263)
	java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334)
	java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312)
	app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72)
	app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: HFileArchiver-3
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: hconnection-0x28111a62-shared-pool-16
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: hconnection-0x28111a62-shared-pool-12
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: ApplicationMasterLauncher #1
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1548841327_22 at /127.0.0.1:44060 [Waiting for operation #4]
	java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
	java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
	app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335)
	app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131)
	java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244)
	java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263)
	java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334)
	java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312)
	app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72)
	app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: HFileArchiver-4
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: hconnection-0x28111a62-shared-pool-13
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1548841327_22 at /127.0.0.1:49124 [Waiting for operation #4]
	java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
	java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
	app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335)
	app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131)
	java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244)
	java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263)
	java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334)
	java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312)
	app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72)
	app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: hconnection-0x28111a62-shared-pool-15
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: process reaper (pid 20804)
	java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method)
	java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1548841327_22 at /127.0.0.1:33806 [Waiting for operation #6]
	java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
	java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
	app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335)
	app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131)
	java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244)
	java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263)
	java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334)
	java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312)
	app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72)
	app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
 - Thread LEAK? -, OpenFileDescriptor=807 (was 805) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=550 (was 544) - SystemLoadAverage LEAK? -, ProcessCount=17 (was 17), AvailableMemoryMB=4185 (was 4631)
2024-12-08T04:27:25,238 WARN  [Time-limited test {}] hbase.ResourceChecker(130): Thread=784 is superior to 500
2024-12-08T04:27:25,256 INFO  [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemState Thread=784, OpenFileDescriptor=807, MaxFileDescriptor=1048576, SystemLoadAverage=550, ProcessCount=17, AvailableMemoryMB=4185
2024-12-08T04:27:25,256 WARN  [Time-limited test {}] hbase.ResourceChecker(130): Thread=784 is superior to 500
2024-12-08T04:27:25,258 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}
2024-12-08T04:27:25,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure2.ProcedureExecutor(1098): Stored pid=63, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportFileSystemState
2024-12-08T04:27:25,261 INFO  [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION
2024-12-08T04:27:25,261 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker
2024-12-08T04:27:25,261 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default"
qualifier: "testtb-testExportFileSystemState"
 procId is: 63
2024-12-08T04:27:25,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63
2024-12-08T04:27:25,262 INFO  [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT
2024-12-08T04:27:25,270 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741961_1137 (size=407)
2024-12-08T04:27:25,271 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741961_1137 (size=407)
2024-12-08T04:27:25,272 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741961_1137 (size=407)
2024-12-08T04:27:25,278 INFO  [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => fc5c860dddd9b21082f1e62c69d29489, NAME => 'testtb-testExportFileSystemState,,1733632045258.fc5c860dddd9b21082f1e62c69d29489.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720
2024-12-08T04:27:25,278 INFO  [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => c003ccb08dbffe882c6fdaadf708022b, NAME => 'testtb-testExportFileSystemState,1,1733632045258.c003ccb08dbffe882c6fdaadf708022b.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720
2024-12-08T04:27:25,295 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741962_1138 (size=68)
2024-12-08T04:27:25,295 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741962_1138 (size=68)
2024-12-08T04:27:25,296 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741962_1138 (size=68)
2024-12-08T04:27:25,297 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemState,,1733632045258.fc5c860dddd9b21082f1e62c69d29489.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable
2024-12-08T04:27:25,297 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1681): Closing fc5c860dddd9b21082f1e62c69d29489, disabling compactions & flushes
2024-12-08T04:27:25,297 INFO  [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemState,,1733632045258.fc5c860dddd9b21082f1e62c69d29489.
2024-12-08T04:27:25,297 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemState,,1733632045258.fc5c860dddd9b21082f1e62c69d29489.
2024-12-08T04:27:25,297 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemState,,1733632045258.fc5c860dddd9b21082f1e62c69d29489. after waiting 0 ms
2024-12-08T04:27:25,297 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemState,,1733632045258.fc5c860dddd9b21082f1e62c69d29489.
2024-12-08T04:27:25,297 INFO  [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemState,,1733632045258.fc5c860dddd9b21082f1e62c69d29489.
2024-12-08T04:27:25,297 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1635): Region close journal for fc5c860dddd9b21082f1e62c69d29489:

2024-12-08T04:27:25,302 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741963_1139 (size=68)
2024-12-08T04:27:25,302 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741963_1139 (size=68)
2024-12-08T04:27:25,303 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741963_1139 (size=68)
2024-12-08T04:27:25,303 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemState,1,1733632045258.c003ccb08dbffe882c6fdaadf708022b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable
2024-12-08T04:27:25,303 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1681): Closing c003ccb08dbffe882c6fdaadf708022b, disabling compactions & flushes
2024-12-08T04:27:25,303 INFO  [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemState,1,1733632045258.c003ccb08dbffe882c6fdaadf708022b.
2024-12-08T04:27:25,303 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemState,1,1733632045258.c003ccb08dbffe882c6fdaadf708022b.
2024-12-08T04:27:25,303 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemState,1,1733632045258.c003ccb08dbffe882c6fdaadf708022b. after waiting 0 ms
2024-12-08T04:27:25,303 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemState,1,1733632045258.c003ccb08dbffe882c6fdaadf708022b.
2024-12-08T04:27:25,303 INFO  [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemState,1,1733632045258.c003ccb08dbffe882c6fdaadf708022b.
2024-12-08T04:27:25,303 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1635): Region close journal for c003ccb08dbffe882c6fdaadf708022b:

2024-12-08T04:27:25,305 INFO  [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META
2024-12-08T04:27:25,305 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,,1733632045258.fc5c860dddd9b21082f1e62c69d29489.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733632045305"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733632045305"}]},"ts":"1733632045305"}
2024-12-08T04:27:25,305 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,1,1733632045258.c003ccb08dbffe882c6fdaadf708022b.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733632045305"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733632045305"}]},"ts":"1733632045305"}
2024-12-08T04:27:25,308 INFO  [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta.
2024-12-08T04:27:25,309 INFO  [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS
2024-12-08T04:27:25,310 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733632045309"}]},"ts":"1733632045309"}
2024-12-08T04:27:25,312 INFO  [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemState, state=ENABLING in hbase:meta
2024-12-08T04:27:25,317 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(202): Hosts are {428ded7e54d6=0} racks are {/default-rack=0}
2024-12-08T04:27:25,319 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 is on host 0
2024-12-08T04:27:25,319 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 is on host 0
2024-12-08T04:27:25,320 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 is on host 0
2024-12-08T04:27:25,320 INFO  [PEWorker-2 {}] balancer.BalancerClusterState(314): server 0 is on rack 0
2024-12-08T04:27:25,320 INFO  [PEWorker-2 {}] balancer.BalancerClusterState(314): server 1 is on rack 0
2024-12-08T04:27:25,320 INFO  [PEWorker-2 {}] balancer.BalancerClusterState(314): server 2 is on rack 0
2024-12-08T04:27:25,320 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1
2024-12-08T04:27:25,320 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=fc5c860dddd9b21082f1e62c69d29489, ASSIGN}, {pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=c003ccb08dbffe882c6fdaadf708022b, ASSIGN}]
2024-12-08T04:27:25,321 INFO  [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=c003ccb08dbffe882c6fdaadf708022b, ASSIGN
2024-12-08T04:27:25,322 INFO  [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=fc5c860dddd9b21082f1e62c69d29489, ASSIGN
2024-12-08T04:27:25,323 INFO  [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=c003ccb08dbffe882c6fdaadf708022b, ASSIGN; state=OFFLINE, location=428ded7e54d6,45955,1733631983994; forceNewPlan=false, retain=false
2024-12-08T04:27:25,323 INFO  [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=fc5c860dddd9b21082f1e62c69d29489, ASSIGN; state=OFFLINE, location=428ded7e54d6,46421,1733631984115; forceNewPlan=false, retain=false
2024-12-08T04:27:25,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63
2024-12-08T04:27:25,473 INFO  [428ded7e54d6:46337 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 
2024-12-08T04:27:25,474 INFO  [PEWorker-4 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=c003ccb08dbffe882c6fdaadf708022b, regionState=OPENING, regionLocation=428ded7e54d6,45955,1733631983994
2024-12-08T04:27:25,474 INFO  [PEWorker-5 {}] assignment.RegionStateStore(202): pid=64 updating hbase:meta row=fc5c860dddd9b21082f1e62c69d29489, regionState=OPENING, regionLocation=428ded7e54d6,46421,1733631984115
2024-12-08T04:27:25,476 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=66, ppid=65, state=RUNNABLE; OpenRegionProcedure c003ccb08dbffe882c6fdaadf708022b, server=428ded7e54d6,45955,1733631983994}]
2024-12-08T04:27:25,477 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=67, ppid=64, state=RUNNABLE; OpenRegionProcedure fc5c860dddd9b21082f1e62c69d29489, server=428ded7e54d6,46421,1733631984115}]
2024-12-08T04:27:25,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63
2024-12-08T04:27:25,629 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,45955,1733631983994
2024-12-08T04:27:25,629 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,46421,1733631984115
2024-12-08T04:27:25,632 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemState,1,1733632045258.c003ccb08dbffe882c6fdaadf708022b.
2024-12-08T04:27:25,632 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemState,,1733632045258.fc5c860dddd9b21082f1e62c69d29489.
2024-12-08T04:27:25,633 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7285): Opening region: {ENCODED => fc5c860dddd9b21082f1e62c69d29489, NAME => 'testtb-testExportFileSystemState,,1733632045258.fc5c860dddd9b21082f1e62c69d29489.', STARTKEY => '', ENDKEY => '1'}
2024-12-08T04:27:25,633 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7285): Opening region: {ENCODED => c003ccb08dbffe882c6fdaadf708022b, NAME => 'testtb-testExportFileSystemState,1,1733632045258.c003ccb08dbffe882c6fdaadf708022b.', STARTKEY => '1', ENDKEY => ''}
2024-12-08T04:27:25,633 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemState,1,1733632045258.c003ccb08dbffe882c6fdaadf708022b. service=AccessControlService
2024-12-08T04:27:25,633 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemState,,1733632045258.fc5c860dddd9b21082f1e62c69d29489. service=AccessControlService
2024-12-08T04:27:25,633 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911.
2024-12-08T04:27:25,633 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911.
2024-12-08T04:27:25,634 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState c003ccb08dbffe882c6fdaadf708022b
2024-12-08T04:27:25,634 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState fc5c860dddd9b21082f1e62c69d29489
2024-12-08T04:27:25,634 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemState,1,1733632045258.c003ccb08dbffe882c6fdaadf708022b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable
2024-12-08T04:27:25,634 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemState,,1733632045258.fc5c860dddd9b21082f1e62c69d29489.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable
2024-12-08T04:27:25,634 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7327): checking encryption for c003ccb08dbffe882c6fdaadf708022b
2024-12-08T04:27:25,634 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7330): checking classloading for c003ccb08dbffe882c6fdaadf708022b
2024-12-08T04:27:25,634 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7327): checking encryption for fc5c860dddd9b21082f1e62c69d29489
2024-12-08T04:27:25,634 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7330): checking classloading for fc5c860dddd9b21082f1e62c69d29489
2024-12-08T04:27:25,636 INFO  [StoreOpener-fc5c860dddd9b21082f1e62c69d29489-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region fc5c860dddd9b21082f1e62c69d29489 
2024-12-08T04:27:25,636 INFO  [StoreOpener-c003ccb08dbffe882c6fdaadf708022b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region c003ccb08dbffe882c6fdaadf708022b 
2024-12-08T04:27:25,638 INFO  [StoreOpener-fc5c860dddd9b21082f1e62c69d29489-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fc5c860dddd9b21082f1e62c69d29489 columnFamilyName cf
2024-12-08T04:27:25,639 DEBUG [StoreOpener-fc5c860dddd9b21082f1e62c69d29489-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker
2024-12-08T04:27:25,639 INFO  [StoreOpener-c003ccb08dbffe882c6fdaadf708022b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c003ccb08dbffe882c6fdaadf708022b columnFamilyName cf
2024-12-08T04:27:25,639 DEBUG [StoreOpener-c003ccb08dbffe882c6fdaadf708022b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker
2024-12-08T04:27:25,640 INFO  [StoreOpener-fc5c860dddd9b21082f1e62c69d29489-1 {}] regionserver.HStore(327): Store=fc5c860dddd9b21082f1e62c69d29489/cf,  memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE
2024-12-08T04:27:25,640 INFO  [StoreOpener-c003ccb08dbffe882c6fdaadf708022b-1 {}] regionserver.HStore(327): Store=c003ccb08dbffe882c6fdaadf708022b/cf,  memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE
2024-12-08T04:27:25,641 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemState/fc5c860dddd9b21082f1e62c69d29489
2024-12-08T04:27:25,641 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemState/c003ccb08dbffe882c6fdaadf708022b
2024-12-08T04:27:25,641 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemState/fc5c860dddd9b21082f1e62c69d29489
2024-12-08T04:27:25,641 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemState/c003ccb08dbffe882c6fdaadf708022b
2024-12-08T04:27:25,645 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1085): writing seq id for c003ccb08dbffe882c6fdaadf708022b
2024-12-08T04:27:25,645 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1085): writing seq id for fc5c860dddd9b21082f1e62c69d29489
2024-12-08T04:27:25,656 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemState/c003ccb08dbffe882c6fdaadf708022b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1
2024-12-08T04:27:25,656 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemState/fc5c860dddd9b21082f1e62c69d29489/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1
2024-12-08T04:27:25,657 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1102): Opened c003ccb08dbffe882c6fdaadf708022b; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60358396, jitterRate=-0.1005898118019104}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1}
2024-12-08T04:27:25,657 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1102): Opened fc5c860dddd9b21082f1e62c69d29489; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72262743, jitterRate=0.07679878175258636}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1}
2024-12-08T04:27:25,658 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1001): Region open journal for c003ccb08dbffe882c6fdaadf708022b:

2024-12-08T04:27:25,658 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1001): Region open journal for fc5c860dddd9b21082f1e62c69d29489:

2024-12-08T04:27:25,659 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemState,1,1733632045258.c003ccb08dbffe882c6fdaadf708022b., pid=66, masterSystemTime=1733632045628
2024-12-08T04:27:25,659 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemState,,1733632045258.fc5c860dddd9b21082f1e62c69d29489., pid=67, masterSystemTime=1733632045628
2024-12-08T04:27:25,660 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemState,1,1733632045258.c003ccb08dbffe882c6fdaadf708022b.
2024-12-08T04:27:25,660 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemState,1,1733632045258.c003ccb08dbffe882c6fdaadf708022b.
2024-12-08T04:27:25,661 INFO  [PEWorker-1 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=c003ccb08dbffe882c6fdaadf708022b, regionState=OPEN, openSeqNum=2, regionLocation=428ded7e54d6,45955,1733631983994
2024-12-08T04:27:25,661 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemState,,1733632045258.fc5c860dddd9b21082f1e62c69d29489.
2024-12-08T04:27:25,661 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemState,,1733632045258.fc5c860dddd9b21082f1e62c69d29489.
2024-12-08T04:27:25,662 INFO  [PEWorker-4 {}] assignment.RegionStateStore(202): pid=64 updating hbase:meta row=fc5c860dddd9b21082f1e62c69d29489, regionState=OPEN, openSeqNum=2, regionLocation=428ded7e54d6,46421,1733631984115
2024-12-08T04:27:25,665 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=66, resume processing ppid=65
2024-12-08T04:27:25,665 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=66, ppid=65, state=SUCCESS; OpenRegionProcedure c003ccb08dbffe882c6fdaadf708022b, server=428ded7e54d6,45955,1733631983994 in 187 msec
2024-12-08T04:27:25,666 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=67, resume processing ppid=64
2024-12-08T04:27:25,666 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=67, ppid=64, state=SUCCESS; OpenRegionProcedure fc5c860dddd9b21082f1e62c69d29489, server=428ded7e54d6,46421,1733631984115 in 187 msec
2024-12-08T04:27:25,667 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=65, ppid=63, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=c003ccb08dbffe882c6fdaadf708022b, ASSIGN in 345 msec
2024-12-08T04:27:25,668 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=64, resume processing ppid=63
2024-12-08T04:27:25,668 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=64, ppid=63, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=fc5c860dddd9b21082f1e62c69d29489, ASSIGN in 346 msec
2024-12-08T04:27:25,669 INFO  [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE
2024-12-08T04:27:25,669 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733632045669"}]},"ts":"1733632045669"}
2024-12-08T04:27:25,670 INFO  [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemState, state=ENABLED in hbase:meta
2024-12-08T04:27:25,673 INFO  [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION
2024-12-08T04:27:25,673 DEBUG [PEWorker-2 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportFileSystemState jenkins: RWXCA
2024-12-08T04:27:25,676 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41743 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA]
2024-12-08T04:27:25,677 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:27:25,677 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:27:25,677 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46421-0x1006fe072e80002, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:27:25,678 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:27:25,679 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04
2024-12-08T04:27:25,679 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04
2024-12-08T04:27:25,679 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04
2024-12-08T04:27:25,680 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04
2024-12-08T04:27:25,681 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=63, state=SUCCESS; CreateTableProcedure table=testtb-testExportFileSystemState in 421 msec
2024-12-08T04:27:25,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63
2024-12-08T04:27:25,866 INFO  [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportFileSystemState, procId: 63 completed
2024-12-08T04:27:25,866 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testExportFileSystemState get assigned. Timeout = 60000ms
2024-12-08T04:27:25,866 INFO  [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1])
2024-12-08T04:27:25,873 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testExportFileSystemState assigned to meta. Checking AM states.
2024-12-08T04:27:25,873 INFO  [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1])
2024-12-08T04:27:25,873 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testExportFileSystemState assigned.
2024-12-08T04:27:25,879 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }
2024-12-08T04:27:25,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733632045879 (current time:1733632045879).
2024-12-08T04:27:25,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0
2024-12-08T04:27:25,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportFileSystemState VERSION not specified, setting to 2
2024-12-08T04:27:25,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot
2024-12-08T04:27:25,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x594b21a4 to 127.0.0.1:55878 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2a25e41e
2024-12-08T04:27:25,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@41b28fdf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null
2024-12-08T04:27:25,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false
2024-12-08T04:27:25,905 INFO  [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48068, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService
2024-12-08T04:27:25,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x594b21a4 to 127.0.0.1:55878
2024-12-08T04:27:25,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(514): Stopping rpc client
2024-12-08T04:27:25,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x467ce503 to 127.0.0.1:55878 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@21edfbf8
2024-12-08T04:27:25,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7474546d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null
2024-12-08T04:27:25,924 DEBUG [hconnection-0x24a1d19e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false
2024-12-08T04:27:25,925 INFO  [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48082, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService
2024-12-08T04:27:25,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x467ce503 to 127.0.0.1:55878
2024-12-08T04:27:25,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(514): Stopping rpc client
2024-12-08T04:27:25,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA]
2024-12-08T04:27:25,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot...
2024-12-08T04:27:25,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure2.ProcedureExecutor(1098): Stored pid=68, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }
2024-12-08T04:27:25,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 68
2024-12-08T04:27:25,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68
2024-12-08T04:27:25,933 INFO  [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE
2024-12-08T04:27:25,934 INFO  [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION
2024-12-08T04:27:25,938 INFO  [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO
2024-12-08T04:27:25,958 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741964_1140 (size=170)
2024-12-08T04:27:25,958 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741964_1140 (size=170)
2024-12-08T04:27:25,958 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741964_1140 (size=170)
2024-12-08T04:27:25,960 INFO  [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS
2024-12-08T04:27:25,961 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=69, ppid=68, state=RUNNABLE; SnapshotRegionProcedure fc5c860dddd9b21082f1e62c69d29489}, {pid=70, ppid=68, state=RUNNABLE; SnapshotRegionProcedure c003ccb08dbffe882c6fdaadf708022b}]
2024-12-08T04:27:25,962 INFO  [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=70, ppid=68, state=RUNNABLE; SnapshotRegionProcedure c003ccb08dbffe882c6fdaadf708022b
2024-12-08T04:27:25,962 INFO  [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=69, ppid=68, state=RUNNABLE; SnapshotRegionProcedure fc5c860dddd9b21082f1e62c69d29489
2024-12-08T04:27:26,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68
2024-12-08T04:27:26,113 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,45955,1733631983994
2024-12-08T04:27:26,113 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,46421,1733631984115
2024-12-08T04:27:26,114 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45955 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=70
2024-12-08T04:27:26,114 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46421 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=69
2024-12-08T04:27:26,114 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1733632045258.fc5c860dddd9b21082f1e62c69d29489.
2024-12-08T04:27:26,114 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1733632045258.c003ccb08dbffe882c6fdaadf708022b.
2024-12-08T04:27:26,114 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.HRegion(2538): Flush status journal for fc5c860dddd9b21082f1e62c69d29489:

2024-12-08T04:27:26,114 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.HRegion(2538): Flush status journal for c003ccb08dbffe882c6fdaadf708022b:

2024-12-08T04:27:26,114 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1733632045258.fc5c860dddd9b21082f1e62c69d29489. for emptySnaptb0-testExportFileSystemState completed.
2024-12-08T04:27:26,114 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1733632045258.c003ccb08dbffe882c6fdaadf708022b. for emptySnaptb0-testExportFileSystemState completed.
2024-12-08T04:27:26,114 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1733632045258.fc5c860dddd9b21082f1e62c69d29489.' region-info for snapshot=emptySnaptb0-testExportFileSystemState
2024-12-08T04:27:26,114 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(246): Creating references for hfiles
2024-12-08T04:27:26,115 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1733632045258.c003ccb08dbffe882c6fdaadf708022b.' region-info for snapshot=emptySnaptb0-testExportFileSystemState
2024-12-08T04:27:26,115 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles
2024-12-08T04:27:26,115 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(246): Creating references for hfiles
2024-12-08T04:27:26,115 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles
2024-12-08T04:27:26,127 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741966_1142 (size=71)
2024-12-08T04:27:26,128 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741966_1142 (size=71)
2024-12-08T04:27:26,128 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741966_1142 (size=71)
2024-12-08T04:27:26,130 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1733632045258.fc5c860dddd9b21082f1e62c69d29489.
2024-12-08T04:27:26,130 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=69
2024-12-08T04:27:26,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster(4106): Remote procedure done, pid=69
2024-12-08T04:27:26,130 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region fc5c860dddd9b21082f1e62c69d29489
2024-12-08T04:27:26,131 INFO  [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=69, ppid=68, state=RUNNABLE; SnapshotRegionProcedure fc5c860dddd9b21082f1e62c69d29489
2024-12-08T04:27:26,133 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=69, ppid=68, state=SUCCESS; SnapshotRegionProcedure fc5c860dddd9b21082f1e62c69d29489 in 171 msec
2024-12-08T04:27:26,147 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741965_1141 (size=71)
2024-12-08T04:27:26,147 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741965_1141 (size=71)
2024-12-08T04:27:26,148 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741965_1141 (size=71)
2024-12-08T04:27:26,148 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1733632045258.c003ccb08dbffe882c6fdaadf708022b.
2024-12-08T04:27:26,148 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=70
2024-12-08T04:27:26,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster(4106): Remote procedure done, pid=70
2024-12-08T04:27:26,148 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region c003ccb08dbffe882c6fdaadf708022b
2024-12-08T04:27:26,149 INFO  [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=70, ppid=68, state=RUNNABLE; SnapshotRegionProcedure c003ccb08dbffe882c6fdaadf708022b
2024-12-08T04:27:26,151 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=70, resume processing ppid=68
2024-12-08T04:27:26,151 INFO  [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS
2024-12-08T04:27:26,151 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=70, ppid=68, state=SUCCESS; SnapshotRegionProcedure c003ccb08dbffe882c6fdaadf708022b in 189 msec
2024-12-08T04:27:26,152 INFO  [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION
2024-12-08T04:27:26,153 INFO  [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT
2024-12-08T04:27:26,153 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemState
2024-12-08T04:27:26,154 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState
2024-12-08T04:27:26,178 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741967_1143 (size=552)
2024-12-08T04:27:26,179 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741967_1143 (size=552)
2024-12-08T04:27:26,181 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741967_1143 (size=552)
2024-12-08T04:27:26,199 INFO  [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT
2024-12-08T04:27:26,210 INFO  [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT
2024-12-08T04:27:26,211 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/emptySnaptb0-testExportFileSystemState
2024-12-08T04:27:26,213 INFO  [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION
2024-12-08T04:27:26,213 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 68
2024-12-08T04:27:26,213 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemState'
2024-12-08T04:27:26,217 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=68, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 282 msec
2024-12-08T04:27:26,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68
2024-12-08T04:27:26,237 INFO  [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState, procId: 68 completed
2024-12-08T04:27:26,256 INFO  [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46421 {}] regionserver.HRegion(8254): writing data to region testtb-testExportFileSystemState,,1733632045258.fc5c860dddd9b21082f1e62c69d29489. with WAL disabled. Data may be lost in the event of a crash.
2024-12-08T04:27:26,264 INFO  [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45955 {}] regionserver.HRegion(8254): writing data to region testtb-testExportFileSystemState,1,1733632045258.c003ccb08dbffe882c6fdaadf708022b. with WAL disabled. Data may be lost in the event of a crash.
2024-12-08T04:27:26,270 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportFileSystemState
2024-12-08T04:27:26,271 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportFileSystemState,,1733632045258.fc5c860dddd9b21082f1e62c69d29489.
2024-12-08T04:27:26,271 INFO  [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1])
2024-12-08T04:27:26,294 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }
2024-12-08T04:27:26,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733632046294 (current time:1733632046294).
2024-12-08T04:27:26,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0
2024-12-08T04:27:26,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportFileSystemState VERSION not specified, setting to 2
2024-12-08T04:27:26,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot
2024-12-08T04:27:26,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1d2987b9 to 127.0.0.1:55878 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@372e88a3
2024-12-08T04:27:26,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11bbe16e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null
2024-12-08T04:27:26,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false
2024-12-08T04:27:26,306 INFO  [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48094, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService
2024-12-08T04:27:26,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1d2987b9 to 127.0.0.1:55878
2024-12-08T04:27:26,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(514): Stopping rpc client
2024-12-08T04:27:26,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4872b299 to 127.0.0.1:55878 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@b5a40a4
2024-12-08T04:27:26,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@45246030, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null
2024-12-08T04:27:26,324 DEBUG [hconnection-0x63e7bf7f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false
2024-12-08T04:27:26,325 INFO  [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48108, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService
2024-12-08T04:27:26,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4872b299 to 127.0.0.1:55878
2024-12-08T04:27:26,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(514): Stopping rpc client
2024-12-08T04:27:26,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA]
2024-12-08T04:27:26,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot...
2024-12-08T04:27:26,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure2.ProcedureExecutor(1098): Stored pid=71, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }
2024-12-08T04:27:26,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 71
2024-12-08T04:27:26,333 INFO  [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE
2024-12-08T04:27:26,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71
2024-12-08T04:27:26,334 INFO  [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION
2024-12-08T04:27:26,337 INFO  [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO
2024-12-08T04:27:26,354 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741968_1144 (size=165)
2024-12-08T04:27:26,354 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741968_1144 (size=165)
2024-12-08T04:27:26,354 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741968_1144 (size=165)
2024-12-08T04:27:26,355 INFO  [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS
2024-12-08T04:27:26,356 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE; SnapshotRegionProcedure fc5c860dddd9b21082f1e62c69d29489}, {pid=73, ppid=71, state=RUNNABLE; SnapshotRegionProcedure c003ccb08dbffe882c6fdaadf708022b}]
2024-12-08T04:27:26,357 INFO  [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=73, ppid=71, state=RUNNABLE; SnapshotRegionProcedure c003ccb08dbffe882c6fdaadf708022b
2024-12-08T04:27:26,357 INFO  [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=72, ppid=71, state=RUNNABLE; SnapshotRegionProcedure fc5c860dddd9b21082f1e62c69d29489
2024-12-08T04:27:26,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71
2024-12-08T04:27:26,508 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,45955,1733631983994
2024-12-08T04:27:26,508 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,46421,1733631984115
2024-12-08T04:27:26,508 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45955 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=73
2024-12-08T04:27:26,508 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46421 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=72
2024-12-08T04:27:26,509 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1733632045258.c003ccb08dbffe882c6fdaadf708022b.
2024-12-08T04:27:26,509 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1733632045258.fc5c860dddd9b21082f1e62c69d29489.
2024-12-08T04:27:26,509 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(2837): Flushing fc5c860dddd9b21082f1e62c69d29489 1/1 column families, dataSize=333 B heapSize=976 B
2024-12-08T04:27:26,509 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(2837): Flushing c003ccb08dbffe882c6fdaadf708022b 1/1 column families, dataSize=2.93 KB heapSize=6.58 KB
2024-12-08T04:27:26,529 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemState/fc5c860dddd9b21082f1e62c69d29489/.tmp/cf/d3951a12538b471084f2db635f1f009e is 71, key is 06f70c5d045ca6653200bf623d1f8d72/cf:q/1733632046256/Put/seqid=0
2024-12-08T04:27:26,533 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemState/c003ccb08dbffe882c6fdaadf708022b/.tmp/cf/cb9d9ec5ce624b76bf684871e676d18c is 71, key is 16d9ac490903afb0f373c397f869a875/cf:q/1733632046263/Put/seqid=0
2024-12-08T04:27:26,543 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741969_1145 (size=5422)
2024-12-08T04:27:26,543 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741969_1145 (size=5422)
2024-12-08T04:27:26,544 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741969_1145 (size=5422)
2024-12-08T04:27:26,544 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=333 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemState/fc5c860dddd9b21082f1e62c69d29489/.tmp/cf/d3951a12538b471084f2db635f1f009e
2024-12-08T04:27:26,553 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemState/fc5c860dddd9b21082f1e62c69d29489/.tmp/cf/d3951a12538b471084f2db635f1f009e as hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemState/fc5c860dddd9b21082f1e62c69d29489/cf/d3951a12538b471084f2db635f1f009e
2024-12-08T04:27:26,553 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741970_1146 (size=8188)
2024-12-08T04:27:26,554 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741970_1146 (size=8188)
2024-12-08T04:27:26,554 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741970_1146 (size=8188)
2024-12-08T04:27:26,556 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.93 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemState/c003ccb08dbffe882c6fdaadf708022b/.tmp/cf/cb9d9ec5ce624b76bf684871e676d18c
2024-12-08T04:27:26,563 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemState/c003ccb08dbffe882c6fdaadf708022b/.tmp/cf/cb9d9ec5ce624b76bf684871e676d18c as hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemState/c003ccb08dbffe882c6fdaadf708022b/cf/cb9d9ec5ce624b76bf684871e676d18c
2024-12-08T04:27:26,565 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemState/fc5c860dddd9b21082f1e62c69d29489/cf/d3951a12538b471084f2db635f1f009e, entries=5, sequenceid=6, filesize=5.3 K
2024-12-08T04:27:26,566 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(3040): Finished flush of dataSize ~333 B/333, heapSize ~960 B/960, currentSize=0 B/0 for fc5c860dddd9b21082f1e62c69d29489 in 57ms, sequenceid=6, compaction requested=false
2024-12-08T04:27:26,566 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(2538): Flush status journal for fc5c860dddd9b21082f1e62c69d29489:

2024-12-08T04:27:26,566 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1733632045258.fc5c860dddd9b21082f1e62c69d29489. for snaptb0-testExportFileSystemState completed.
2024-12-08T04:27:26,566 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1733632045258.fc5c860dddd9b21082f1e62c69d29489.' region-info for snapshot=snaptb0-testExportFileSystemState
2024-12-08T04:27:26,566 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(246): Creating references for hfiles
2024-12-08T04:27:26,566 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemState/fc5c860dddd9b21082f1e62c69d29489/cf/d3951a12538b471084f2db635f1f009e] hfiles
2024-12-08T04:27:26,566 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemState/fc5c860dddd9b21082f1e62c69d29489/cf/d3951a12538b471084f2db635f1f009e for snapshot=snaptb0-testExportFileSystemState
2024-12-08T04:27:26,573 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemState/c003ccb08dbffe882c6fdaadf708022b/cf/cb9d9ec5ce624b76bf684871e676d18c, entries=45, sequenceid=6, filesize=8.0 K
2024-12-08T04:27:26,574 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(3040): Finished flush of dataSize ~2.93 KB/3003, heapSize ~6.56 KB/6720, currentSize=0 B/0 for c003ccb08dbffe882c6fdaadf708022b in 65ms, sequenceid=6, compaction requested=false
2024-12-08T04:27:26,574 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(2538): Flush status journal for c003ccb08dbffe882c6fdaadf708022b:

2024-12-08T04:27:26,574 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1733632045258.c003ccb08dbffe882c6fdaadf708022b. for snaptb0-testExportFileSystemState completed.
2024-12-08T04:27:26,574 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1733632045258.c003ccb08dbffe882c6fdaadf708022b.' region-info for snapshot=snaptb0-testExportFileSystemState
2024-12-08T04:27:26,574 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(246): Creating references for hfiles
2024-12-08T04:27:26,574 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemState/c003ccb08dbffe882c6fdaadf708022b/cf/cb9d9ec5ce624b76bf684871e676d18c] hfiles
2024-12-08T04:27:26,574 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemState/c003ccb08dbffe882c6fdaadf708022b/cf/cb9d9ec5ce624b76bf684871e676d18c for snapshot=snaptb0-testExportFileSystemState
2024-12-08T04:27:26,596 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741971_1147 (size=110)
2024-12-08T04:27:26,597 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741971_1147 (size=110)
2024-12-08T04:27:26,597 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741971_1147 (size=110)
2024-12-08T04:27:26,598 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1733632045258.fc5c860dddd9b21082f1e62c69d29489.
2024-12-08T04:27:26,598 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72
2024-12-08T04:27:26,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster(4106): Remote procedure done, pid=72
2024-12-08T04:27:26,599 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region fc5c860dddd9b21082f1e62c69d29489
2024-12-08T04:27:26,599 INFO  [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=72, ppid=71, state=RUNNABLE; SnapshotRegionProcedure fc5c860dddd9b21082f1e62c69d29489
2024-12-08T04:27:26,602 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=72, ppid=71, state=SUCCESS; SnapshotRegionProcedure fc5c860dddd9b21082f1e62c69d29489 in 244 msec
2024-12-08T04:27:26,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71
2024-12-08T04:27:26,640 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741972_1148 (size=110)
2024-12-08T04:27:26,640 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741972_1148 (size=110)
2024-12-08T04:27:26,641 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741972_1148 (size=110)
2024-12-08T04:27:26,642 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1733632045258.c003ccb08dbffe882c6fdaadf708022b.
2024-12-08T04:27:26,642 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=73
2024-12-08T04:27:26,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster(4106): Remote procedure done, pid=73
2024-12-08T04:27:26,642 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region c003ccb08dbffe882c6fdaadf708022b
2024-12-08T04:27:26,643 INFO  [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=73, ppid=71, state=RUNNABLE; SnapshotRegionProcedure c003ccb08dbffe882c6fdaadf708022b
2024-12-08T04:27:26,647 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=73, resume processing ppid=71
2024-12-08T04:27:26,647 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=73, ppid=71, state=SUCCESS; SnapshotRegionProcedure c003ccb08dbffe882c6fdaadf708022b in 288 msec
2024-12-08T04:27:26,647 INFO  [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS
2024-12-08T04:27:26,649 INFO  [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION
2024-12-08T04:27:26,650 INFO  [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT
2024-12-08T04:27:26,650 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemState
2024-12-08T04:27:26,651 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState
2024-12-08T04:27:26,706 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741973_1149 (size=630)
2024-12-08T04:27:26,707 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741973_1149 (size=630)
2024-12-08T04:27:26,708 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741973_1149 (size=630)
2024-12-08T04:27:26,718 INFO  [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT
2024-12-08T04:27:26,730 INFO  [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT
2024-12-08T04:27:26,731 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/snaptb0-testExportFileSystemState
2024-12-08T04:27:26,733 INFO  [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION
2024-12-08T04:27:26,733 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 71
2024-12-08T04:27:26,735 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 402 msec
2024-12-08T04:27:26,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71
2024-12-08T04:27:26,938 INFO  [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState, procId: 71 completed
2024-12-08T04:27:26,938 INFO  [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632046938
2024-12-08T04:27:26,939 INFO  [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:41407, tgtDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632046938, rawTgtDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632046938, srcFsUri=hdfs://localhost:41407, srcDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720
2024-12-08T04:27:26,984 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:41407, inputRoot=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720
2024-12-08T04:27:26,984 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1548841327_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632046938, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632046938/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState
2024-12-08T04:27:26,987 INFO  [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity.
2024-12-08T04:27:26,992 INFO  [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/snaptb0-testExportFileSystemState to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632046938/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState
2024-12-08T04:27:27,009 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741974_1150 (size=630)
2024-12-08T04:27:27,009 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741974_1150 (size=630)
2024-12-08T04:27:27,010 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741974_1150 (size=630)
2024-12-08T04:27:27,013 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741975_1151 (size=165)
2024-12-08T04:27:27,014 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741975_1151 (size=165)
2024-12-08T04:27:27,014 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741975_1151 (size=165)
2024-12-08T04:27:27,016 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar
2024-12-08T04:27:27,017 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar
2024-12-08T04:27:27,017 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar
2024-12-08T04:27:27,018 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar
2024-12-08T04:27:28,143 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/hadoop-3631199651201539021.jar
2024-12-08T04:27:28,144 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar
2024-12-08T04:27:28,145 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar
2024-12-08T04:27:28,231 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/hadoop-17702973167150828561.jar
2024-12-08T04:27:28,232 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar
2024-12-08T04:27:28,232 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar
2024-12-08T04:27:28,232 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar
2024-12-08T04:27:28,233 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar
2024-12-08T04:27:28,233 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar
2024-12-08T04:27:28,233 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar
2024-12-08T04:27:28,233 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar
2024-12-08T04:27:28,234 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar
2024-12-08T04:27:28,234 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar
2024-12-08T04:27:28,234 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar
2024-12-08T04:27:28,234 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar
2024-12-08T04:27:28,235 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar
2024-12-08T04:27:28,235 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar
2024-12-08T04:27:28,235 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar
2024-12-08T04:27:28,235 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar
2024-12-08T04:27:28,236 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar
2024-12-08T04:27:28,236 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar
2024-12-08T04:27:28,236 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar
2024-12-08T04:27:28,237 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar
2024-12-08T04:27:28,237 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar
2024-12-08T04:27:28,237 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar
2024-12-08T04:27:28,237 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar
2024-12-08T04:27:28,237 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar
2024-12-08T04:27:28,238 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar
2024-12-08T04:27:28,238 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar
2024-12-08T04:27:28,303 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741976_1152 (size=127628)
2024-12-08T04:27:28,303 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741976_1152 (size=127628)
2024-12-08T04:27:28,303 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741976_1152 (size=127628)
2024-12-08T04:27:28,304 DEBUG [master/428ded7e54d6:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region ab3154ca90ccc96a74d87ae33022559e changed from -1.0 to 0.0, refreshing cache
2024-12-08T04:27:28,304 DEBUG [master/428ded7e54d6:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 092cf4729ca6e7ca2b7aa78df922ed6c changed from -1.0 to 0.0, refreshing cache
2024-12-08T04:27:28,305 DEBUG [master/428ded7e54d6:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region c003ccb08dbffe882c6fdaadf708022b changed from -1.0 to 0.0, refreshing cache
2024-12-08T04:27:28,305 DEBUG [master/428ded7e54d6:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region fc5c860dddd9b21082f1e62c69d29489 changed from -1.0 to 0.0, refreshing cache
2024-12-08T04:27:28,318 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741977_1153 (size=2172101)
2024-12-08T04:27:28,318 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741977_1153 (size=2172101)
2024-12-08T04:27:28,319 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741977_1153 (size=2172101)
2024-12-08T04:27:28,327 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741978_1154 (size=213228)
2024-12-08T04:27:28,328 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741978_1154 (size=213228)
2024-12-08T04:27:28,328 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741978_1154 (size=213228)
2024-12-08T04:27:28,343 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741979_1155 (size=1877034)
2024-12-08T04:27:28,344 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741979_1155 (size=1877034)
2024-12-08T04:27:28,345 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741979_1155 (size=1877034)
2024-12-08T04:27:28,358 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741980_1156 (size=533455)
2024-12-08T04:27:28,359 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741980_1156 (size=533455)
2024-12-08T04:27:28,360 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741980_1156 (size=533455)
2024-12-08T04:27:28,401 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741981_1157 (size=7280644)
2024-12-08T04:27:28,402 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741981_1157 (size=7280644)
2024-12-08T04:27:28,403 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741981_1157 (size=7280644)
2024-12-08T04:27:28,439 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741982_1158 (size=4188619)
2024-12-08T04:27:28,440 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741982_1158 (size=4188619)
2024-12-08T04:27:28,444 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741982_1158 (size=4188619)
2024-12-08T04:27:28,463 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741983_1159 (size=20406)
2024-12-08T04:27:28,464 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741983_1159 (size=20406)
2024-12-08T04:27:28,465 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741983_1159 (size=20406)
2024-12-08T04:27:28,481 INFO  [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733631992429_0002_000001 (auth:SIMPLE) from 127.0.0.1:59872
2024-12-08T04:27:28,494 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741984_1160 (size=75495)
2024-12-08T04:27:28,496 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741984_1160 (size=75495)
2024-12-08T04:27:28,497 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741984_1160 (size=75495)
2024-12-08T04:27:28,515 WARN  [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-1_1/usercache/jenkins/appcache/application_1733631992429_0002/container_1733631992429_0002_01_000001/launch_container.sh]
2024-12-08T04:27:28,515 WARN  [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-1_1/usercache/jenkins/appcache/application_1733631992429_0002/container_1733631992429_0002_01_000001/container_tokens]
2024-12-08T04:27:28,515 WARN  [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-1_1/usercache/jenkins/appcache/application_1733631992429_0002/container_1733631992429_0002_01_000001/sysfs]
2024-12-08T04:27:28,523 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741985_1161 (size=45609)
2024-12-08T04:27:28,523 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741985_1161 (size=45609)
2024-12-08T04:27:28,523 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741985_1161 (size=45609)
2024-12-08T04:27:28,551 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741986_1162 (size=110084)
2024-12-08T04:27:28,552 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741986_1162 (size=110084)
2024-12-08T04:27:28,552 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741986_1162 (size=110084)
2024-12-08T04:27:28,593 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741987_1163 (size=1323991)
2024-12-08T04:27:28,593 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741987_1163 (size=1323991)
2024-12-08T04:27:28,595 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741987_1163 (size=1323991)
2024-12-08T04:27:28,610 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741988_1164 (size=23076)
2024-12-08T04:27:28,610 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741988_1164 (size=23076)
2024-12-08T04:27:28,610 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741988_1164 (size=23076)
2024-12-08T04:27:28,626 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741989_1165 (size=126803)
2024-12-08T04:27:28,627 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741989_1165 (size=126803)
2024-12-08T04:27:28,627 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741989_1165 (size=126803)
2024-12-08T04:27:28,638 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741990_1166 (size=322274)
2024-12-08T04:27:28,638 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741990_1166 (size=322274)
2024-12-08T04:27:28,639 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741990_1166 (size=322274)
2024-12-08T04:27:28,655 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741991_1167 (size=1832290)
2024-12-08T04:27:28,655 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741991_1167 (size=1832290)
2024-12-08T04:27:28,656 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741991_1167 (size=1832290)
2024-12-08T04:27:28,678 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741992_1168 (size=30081)
2024-12-08T04:27:28,678 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741992_1168 (size=30081)
2024-12-08T04:27:28,679 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741992_1168 (size=30081)
2024-12-08T04:27:28,695 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741993_1169 (size=53616)
2024-12-08T04:27:28,695 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741993_1169 (size=53616)
2024-12-08T04:27:28,695 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741993_1169 (size=53616)
2024-12-08T04:27:28,729 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741994_1170 (size=29229)
2024-12-08T04:27:28,730 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741994_1170 (size=29229)
2024-12-08T04:27:28,730 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741994_1170 (size=29229)
2024-12-08T04:27:28,746 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741995_1171 (size=169089)
2024-12-08T04:27:28,747 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741995_1171 (size=169089)
2024-12-08T04:27:28,747 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741995_1171 (size=169089)
2024-12-08T04:27:28,761 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741996_1172 (size=451756)
2024-12-08T04:27:28,761 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741996_1172 (size=451756)
2024-12-08T04:27:28,762 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741996_1172 (size=451756)
2024-12-08T04:27:28,794 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741997_1173 (size=6350155)
2024-12-08T04:27:28,796 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741997_1173 (size=6350155)
2024-12-08T04:27:28,796 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741997_1173 (size=6350155)
2024-12-08T04:27:28,829 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741998_1174 (size=5175431)
2024-12-08T04:27:28,829 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741998_1174 (size=5175431)
2024-12-08T04:27:28,830 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741998_1174 (size=5175431)
2024-12-08T04:27:28,842 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741999_1175 (size=136454)
2024-12-08T04:27:28,842 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741999_1175 (size=136454)
2024-12-08T04:27:28,842 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741999_1175 (size=136454)
2024-12-08T04:27:28,854 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742000_1176 (size=907852)
2024-12-08T04:27:28,855 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742000_1176 (size=907852)
2024-12-08T04:27:28,855 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742000_1176 (size=907852)
2024-12-08T04:27:28,878 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742001_1177 (size=3317408)
2024-12-08T04:27:28,878 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742001_1177 (size=3317408)
2024-12-08T04:27:28,879 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742001_1177 (size=3317408)
2024-12-08T04:27:28,899 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742002_1178 (size=503880)
2024-12-08T04:27:28,900 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742002_1178 (size=503880)
2024-12-08T04:27:28,900 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742002_1178 (size=503880)
2024-12-08T04:27:28,938 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742003_1179 (size=4695811)
2024-12-08T04:27:28,938 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742003_1179 (size=4695811)
2024-12-08T04:27:28,939 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742003_1179 (size=4695811)
2024-12-08T04:27:28,941 WARN  [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set.  User classes may not be found. See Job or Job#setJar(String).
2024-12-08T04:27:28,944 INFO  [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testExportFileSystemState' hfile list
2024-12-08T04:27:28,946 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K
2024-12-08T04:27:28,961 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742004_1180 (size=344)
2024-12-08T04:27:28,961 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742004_1180 (size=344)
2024-12-08T04:27:28,962 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742004_1180 (size=344)
2024-12-08T04:27:28,976 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742005_1181 (size=15)
2024-12-08T04:27:28,976 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742005_1181 (size=15)
2024-12-08T04:27:28,983 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742005_1181 (size=15)
2024-12-08T04:27:29,015 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742006_1182 (size=304891)
2024-12-08T04:27:29,016 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742006_1182 (size=304891)
2024-12-08T04:27:29,016 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742006_1182 (size=304891)
2024-12-08T04:27:29,044 WARN  [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start
2024-12-08T04:27:29,044 WARN  [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start
2024-12-08T04:27:29,083 INFO  [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733631992429_0003_000001 (auth:SIMPLE) from 127.0.0.1:52310
2024-12-08T04:27:30,053 WARN  [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties
2024-12-08T04:27:33,632 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState
2024-12-08T04:27:33,632 INFO  [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState Metrics about Tables on a single HBase RegionServer
2024-12-08T04:27:33,633 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl
2024-12-08T04:27:33,634 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl
2024-12-08T04:27:36,183 INFO  [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733631992429_0003_000001 (auth:SIMPLE) from 127.0.0.1:37434
2024-12-08T04:27:36,537 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742007_1183 (size=350565)
2024-12-08T04:27:36,538 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742007_1183 (size=350565)
2024-12-08T04:27:36,538 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742007_1183 (size=350565)
2024-12-08T04:27:38,538 INFO  [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733631992429_0003_000001 (auth:SIMPLE) from 127.0.0.1:59774
2024-12-08T04:27:39,136 WARN  [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties
2024-12-08T04:27:43,288 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742008_1184 (size=8188)
2024-12-08T04:27:43,288 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742008_1184 (size=8188)
2024-12-08T04:27:43,289 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742008_1184 (size=8188)
2024-12-08T04:27:43,351 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742009_1185 (size=5422)
2024-12-08T04:27:43,352 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742009_1185 (size=5422)
2024-12-08T04:27:43,352 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742009_1185 (size=5422)
2024-12-08T04:27:43,477 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742010_1186 (size=17422)
2024-12-08T04:27:43,478 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742010_1186 (size=17422)
2024-12-08T04:27:43,479 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742010_1186 (size=17422)
2024-12-08T04:27:43,508 WARN  [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-1_3/usercache/jenkins/appcache/application_1733631992429_0003/container_1733631992429_0003_01_000002/launch_container.sh]
2024-12-08T04:27:43,508 WARN  [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-1_3/usercache/jenkins/appcache/application_1733631992429_0003/container_1733631992429_0003_01_000002/container_tokens]
2024-12-08T04:27:43,508 WARN  [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-1_3/usercache/jenkins/appcache/application_1733631992429_0003/container_1733631992429_0003_01_000002/sysfs]
2024-12-08T04:27:43,511 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742011_1187 (size=465)
2024-12-08T04:27:43,512 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742011_1187 (size=465)
2024-12-08T04:27:43,512 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742011_1187 (size=465)
2024-12-08T04:27:43,612 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742012_1188 (size=17422)
2024-12-08T04:27:43,612 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742012_1188 (size=17422)
2024-12-08T04:27:43,613 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742012_1188 (size=17422)
2024-12-08T04:27:43,679 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742013_1189 (size=350565)
2024-12-08T04:27:43,679 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742013_1189 (size=350565)
2024-12-08T04:27:43,680 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742013_1189 (size=350565)
2024-12-08T04:27:43,696 INFO  [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733631992429_0003_000001 (auth:SIMPLE) from 127.0.0.1:39504
2024-12-08T04:27:45,236 INFO  [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export
2024-12-08T04:27:45,245 INFO  [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity.
2024-12-08T04:27:45,273 INFO  [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb0-testExportFileSystemState
2024-12-08T04:27:45,273 INFO  [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot
2024-12-08T04:27:45,274 INFO  [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state
2024-12-08T04:27:45,274 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1548841327_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/snaptb0-testExportFileSystemState
2024-12-08T04:27:45,275 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo
2024-12-08T04:27:45,275 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest
2024-12-08T04:27:45,275 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1548841327_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632046938/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632046938/.hbase-snapshot/snaptb0-testExportFileSystemState
2024-12-08T04:27:45,275 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632046938/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo
2024-12-08T04:27:45,275 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632046938/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest
2024-12-08T04:27:45,283 INFO  [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportFileSystemState
2024-12-08T04:27:45,284 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemState
2024-12-08T04:27:45,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure2.ProcedureExecutor(1098): Stored pid=74, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportFileSystemState
2024-12-08T04:27:45,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=74
2024-12-08T04:27:45,287 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733632065287"}]},"ts":"1733632065287"}
2024-12-08T04:27:45,294 INFO  [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemState, state=DISABLING in hbase:meta
2024-12-08T04:27:45,298 INFO  [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemState to state=DISABLING
2024-12-08T04:27:45,299 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=75, ppid=74, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportFileSystemState}]
2024-12-08T04:27:45,301 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=76, ppid=75, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=fc5c860dddd9b21082f1e62c69d29489, UNASSIGN}, {pid=77, ppid=75, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=c003ccb08dbffe882c6fdaadf708022b, UNASSIGN}]
2024-12-08T04:27:45,303 INFO  [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=77, ppid=75, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=c003ccb08dbffe882c6fdaadf708022b, UNASSIGN
2024-12-08T04:27:45,303 INFO  [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=76, ppid=75, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=fc5c860dddd9b21082f1e62c69d29489, UNASSIGN
2024-12-08T04:27:45,304 INFO  [PEWorker-4 {}] assignment.RegionStateStore(202): pid=76 updating hbase:meta row=fc5c860dddd9b21082f1e62c69d29489, regionState=CLOSING, regionLocation=428ded7e54d6,46421,1733631984115
2024-12-08T04:27:45,304 INFO  [PEWorker-2 {}] assignment.RegionStateStore(202): pid=77 updating hbase:meta row=c003ccb08dbffe882c6fdaadf708022b, regionState=CLOSING, regionLocation=428ded7e54d6,45955,1733631983994
2024-12-08T04:27:45,307 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false
2024-12-08T04:27:45,307 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=78, ppid=76, state=RUNNABLE; CloseRegionProcedure fc5c860dddd9b21082f1e62c69d29489, server=428ded7e54d6,46421,1733631984115}]
2024-12-08T04:27:45,308 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false
2024-12-08T04:27:45,308 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=79, ppid=77, state=RUNNABLE; CloseRegionProcedure c003ccb08dbffe882c6fdaadf708022b, server=428ded7e54d6,45955,1733631983994}]
2024-12-08T04:27:45,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=74
2024-12-08T04:27:45,459 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,46421,1733631984115
2024-12-08T04:27:45,460 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] handler.UnassignRegionHandler(124): Close fc5c860dddd9b21082f1e62c69d29489
2024-12-08T04:27:45,460 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false
2024-12-08T04:27:45,460 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1681): Closing fc5c860dddd9b21082f1e62c69d29489, disabling compactions & flushes
2024-12-08T04:27:45,460 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemState,,1733632045258.fc5c860dddd9b21082f1e62c69d29489.
2024-12-08T04:27:45,460 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemState,,1733632045258.fc5c860dddd9b21082f1e62c69d29489.
2024-12-08T04:27:45,460 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemState,,1733632045258.fc5c860dddd9b21082f1e62c69d29489. after waiting 0 ms
2024-12-08T04:27:45,460 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemState,,1733632045258.fc5c860dddd9b21082f1e62c69d29489.
2024-12-08T04:27:45,460 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,45955,1733631983994
2024-12-08T04:27:45,461 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] handler.UnassignRegionHandler(124): Close c003ccb08dbffe882c6fdaadf708022b
2024-12-08T04:27:45,461 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false
2024-12-08T04:27:45,461 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1681): Closing c003ccb08dbffe882c6fdaadf708022b, disabling compactions & flushes
2024-12-08T04:27:45,461 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemState,1,1733632045258.c003ccb08dbffe882c6fdaadf708022b.
2024-12-08T04:27:45,461 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemState,1,1733632045258.c003ccb08dbffe882c6fdaadf708022b.
2024-12-08T04:27:45,461 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemState,1,1733632045258.c003ccb08dbffe882c6fdaadf708022b. after waiting 0 ms
2024-12-08T04:27:45,461 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemState,1,1733632045258.c003ccb08dbffe882c6fdaadf708022b.
2024-12-08T04:27:45,467 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemState/fc5c860dddd9b21082f1e62c69d29489/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1
2024-12-08T04:27:45,468 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController
2024-12-08T04:27:45,468 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemState,,1733632045258.fc5c860dddd9b21082f1e62c69d29489.
2024-12-08T04:27:45,468 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1635): Region close journal for fc5c860dddd9b21082f1e62c69d29489:

2024-12-08T04:27:45,468 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemState/c003ccb08dbffe882c6fdaadf708022b/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1
2024-12-08T04:27:45,469 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController
2024-12-08T04:27:45,469 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemState,1,1733632045258.c003ccb08dbffe882c6fdaadf708022b.
2024-12-08T04:27:45,469 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1635): Region close journal for c003ccb08dbffe882c6fdaadf708022b:

2024-12-08T04:27:45,471 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] handler.UnassignRegionHandler(170): Closed fc5c860dddd9b21082f1e62c69d29489
2024-12-08T04:27:45,472 INFO  [PEWorker-1 {}] assignment.RegionStateStore(202): pid=76 updating hbase:meta row=fc5c860dddd9b21082f1e62c69d29489, regionState=CLOSED
2024-12-08T04:27:45,472 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] handler.UnassignRegionHandler(170): Closed c003ccb08dbffe882c6fdaadf708022b
2024-12-08T04:27:45,472 INFO  [PEWorker-5 {}] assignment.RegionStateStore(202): pid=77 updating hbase:meta row=c003ccb08dbffe882c6fdaadf708022b, regionState=CLOSED
2024-12-08T04:27:45,481 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=78, resume processing ppid=76
2024-12-08T04:27:45,481 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=79, resume processing ppid=77
2024-12-08T04:27:45,481 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=78, ppid=76, state=SUCCESS; CloseRegionProcedure fc5c860dddd9b21082f1e62c69d29489, server=428ded7e54d6,46421,1733631984115 in 166 msec
2024-12-08T04:27:45,481 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=79, ppid=77, state=SUCCESS; CloseRegionProcedure c003ccb08dbffe882c6fdaadf708022b, server=428ded7e54d6,45955,1733631983994 in 167 msec
2024-12-08T04:27:45,482 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=76, ppid=75, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=fc5c860dddd9b21082f1e62c69d29489, UNASSIGN in 180 msec
2024-12-08T04:27:45,484 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=77, resume processing ppid=75
2024-12-08T04:27:45,484 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=77, ppid=75, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=c003ccb08dbffe882c6fdaadf708022b, UNASSIGN in 181 msec
2024-12-08T04:27:45,486 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=75, resume processing ppid=74
2024-12-08T04:27:45,486 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=75, ppid=74, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportFileSystemState in 185 msec
2024-12-08T04:27:45,487 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733632065487"}]},"ts":"1733632065487"}
2024-12-08T04:27:45,489 INFO  [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemState, state=DISABLED in hbase:meta
2024-12-08T04:27:45,491 INFO  [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemState to state=DISABLED
2024-12-08T04:27:45,493 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=74, state=SUCCESS; DisableTableProcedure table=testtb-testExportFileSystemState in 208 msec
2024-12-08T04:27:45,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=74
2024-12-08T04:27:45,590 INFO  [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemState, procId: 74 completed
2024-12-08T04:27:45,592 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemState
2024-12-08T04:27:45,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure2.ProcedureExecutor(1098): Stored pid=80, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportFileSystemState
2024-12-08T04:27:45,594 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=80, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemState
2024-12-08T04:27:45,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportFileSystemState
2024-12-08T04:27:45,595 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=80, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemState
2024-12-08T04:27:45,597 INFO  [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41743 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemState
2024-12-08T04:27:45,600 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState
2024-12-08T04:27:45,600 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46421-0x1006fe072e80002, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState
2024-12-08T04:27:45,600 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState
2024-12-08T04:27:45,600 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState
2024-12-08T04:27:45,601 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF
2024-12-08T04:27:45,601 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF
2024-12-08T04:27:45,601 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF
2024-12-08T04:27:45,601 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF
2024-12-08T04:27:45,605 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState
2024-12-08T04:27:45,606 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:27:45,606 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46421-0x1006fe072e80002, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState
2024-12-08T04:27:45,606 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState
2024-12-08T04:27:45,606 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46421-0x1006fe072e80002, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:27:45,606 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:27:45,606 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState
2024-12-08T04:27:45,606 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:27:45,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=80
2024-12-08T04:27:45,616 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemState/fc5c860dddd9b21082f1e62c69d29489
2024-12-08T04:27:45,617 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemState/c003ccb08dbffe882c6fdaadf708022b
2024-12-08T04:27:45,619 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemState/fc5c860dddd9b21082f1e62c69d29489/cf, FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemState/fc5c860dddd9b21082f1e62c69d29489/recovered.edits]
2024-12-08T04:27:45,620 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemState/c003ccb08dbffe882c6fdaadf708022b/cf, FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemState/c003ccb08dbffe882c6fdaadf708022b/recovered.edits]
2024-12-08T04:27:45,625 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemState/fc5c860dddd9b21082f1e62c69d29489/cf/d3951a12538b471084f2db635f1f009e to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/archive/data/default/testtb-testExportFileSystemState/fc5c860dddd9b21082f1e62c69d29489/cf/d3951a12538b471084f2db635f1f009e
2024-12-08T04:27:45,627 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemState/c003ccb08dbffe882c6fdaadf708022b/cf/cb9d9ec5ce624b76bf684871e676d18c to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/archive/data/default/testtb-testExportFileSystemState/c003ccb08dbffe882c6fdaadf708022b/cf/cb9d9ec5ce624b76bf684871e676d18c
2024-12-08T04:27:45,629 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemState/fc5c860dddd9b21082f1e62c69d29489/recovered.edits/9.seqid to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/archive/data/default/testtb-testExportFileSystemState/fc5c860dddd9b21082f1e62c69d29489/recovered.edits/9.seqid
2024-12-08T04:27:45,630 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemState/fc5c860dddd9b21082f1e62c69d29489
2024-12-08T04:27:45,633 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemState/c003ccb08dbffe882c6fdaadf708022b/recovered.edits/9.seqid to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/archive/data/default/testtb-testExportFileSystemState/c003ccb08dbffe882c6fdaadf708022b/recovered.edits/9.seqid
2024-12-08T04:27:45,634 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemState/c003ccb08dbffe882c6fdaadf708022b
2024-12-08T04:27:45,634 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemState regions
2024-12-08T04:27:45,637 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=80, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemState
2024-12-08T04:27:45,641 WARN  [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemState from hbase:meta
2024-12-08T04:27:45,644 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemState' descriptor.
2024-12-08T04:27:45,645 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=80, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemState
2024-12-08T04:27:45,645 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemState' from region states.
2024-12-08T04:27:45,645 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,,1733632045258.fc5c860dddd9b21082f1e62c69d29489.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733632065645"}]},"ts":"9223372036854775807"}
2024-12-08T04:27:45,645 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,1,1733632045258.c003ccb08dbffe882c6fdaadf708022b.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733632065645"}]},"ts":"9223372036854775807"}
2024-12-08T04:27:45,652 INFO  [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META
2024-12-08T04:27:45,652 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => fc5c860dddd9b21082f1e62c69d29489, NAME => 'testtb-testExportFileSystemState,,1733632045258.fc5c860dddd9b21082f1e62c69d29489.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => c003ccb08dbffe882c6fdaadf708022b, NAME => 'testtb-testExportFileSystemState,1,1733632045258.c003ccb08dbffe882c6fdaadf708022b.', STARTKEY => '1', ENDKEY => ''}]
2024-12-08T04:27:45,652 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemState' as deleted.
2024-12-08T04:27:45,652 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733632065652"}]},"ts":"9223372036854775807"}
2024-12-08T04:27:45,654 INFO  [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportFileSystemState state from META
2024-12-08T04:27:45,657 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=80, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemState
2024-12-08T04:27:45,658 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=80, state=SUCCESS; DeleteTableProcedure table=testtb-testExportFileSystemState in 65 msec
2024-12-08T04:27:45,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=80
2024-12-08T04:27:45,709 INFO  [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportFileSystemState, procId: 80 completed
2024-12-08T04:27:45,717 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemState"

2024-12-08T04:27:45,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportFileSystemState
2024-12-08T04:27:45,721 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemState"

2024-12-08T04:27:45,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportFileSystemState
2024-12-08T04:27:45,752 INFO  [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemState Thread=788 (was 784)
Potentially hanging thread: hconnection-0x28111a62-shared-pool-20
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1548841327_22 at /127.0.0.1:33534 [Waiting for operation #4]
	java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
	java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
	app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335)
	app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131)
	java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244)
	java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263)
	java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334)
	java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312)
	app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72)
	app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46155
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704)
	java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903)
	app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: LogDeleter #1
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
	java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177)
	java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1548841327_22 at /127.0.0.1:45422 [Waiting for operation #5]
	java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
	java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
	app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335)
	app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131)
	java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244)
	java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263)
	java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334)
	java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312)
	app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72)
	app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: IPC Client (30462390) connection to localhost/127.0.0.1:40341 from jenkins
	java.base@17.0.11/java.lang.Object.wait(Native Method)
	app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042)
	app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093)

Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1668265020_1 at /127.0.0.1:33516 [Waiting for operation #2]
	java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
	java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
	app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335)
	app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131)
	java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244)
	java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263)
	java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334)
	java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312)
	app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72)
	app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: hconnection-0x28111a62-shared-pool-18
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: HFileArchiver-7
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40341
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704)
	java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903)
	app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1548841327_22 at /127.0.0.1:42462 [Waiting for operation #3]
	java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
	java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
	app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335)
	app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131)
	java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244)
	java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263)
	java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334)
	java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312)
	app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72)
	app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: hconnection-0x28111a62-shared-pool-19
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: region-location-4
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
	java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170)
	java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: hconnection-0x28111a62-shared-pool-17
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: IPC Client (30462390) connection to localhost/127.0.0.1:34619 from jenkins
	java.base@17.0.11/java.lang.Object.wait(Native Method)
	app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042)
	app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093)

Potentially hanging thread: HFileArchiver-8
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: ApplicationMasterLauncher #3
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: process reaper (pid 23281)
	java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method)
	java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1668265020_1 at /127.0.0.1:42442 [Waiting for operation #2]
	java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
	java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
	app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335)
	app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131)
	java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244)
	java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263)
	java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334)
	java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312)
	app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72)
	app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: ApplicationMasterLauncher #4
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: Thread-2659
	java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method)
	java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276)
	java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282)
	java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343)
	java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281)
	java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324)
	java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189)
	java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177)
	java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162)
	java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329)
	java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396)
	app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025)
 - Thread LEAK? -, OpenFileDescriptor=811 (was 807) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=568 (was 550) - SystemLoadAverage LEAK? -, ProcessCount=20 (was 17) - ProcessCount LEAK? -, AvailableMemoryMB=3762 (was 4185)
2024-12-08T04:27:45,752 WARN  [Time-limited test {}] hbase.ResourceChecker(130): Thread=788 is superior to 500
2024-12-08T04:27:45,779 INFO  [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testConsecutiveExports Thread=788, OpenFileDescriptor=811, MaxFileDescriptor=1048576, SystemLoadAverage=568, ProcessCount=20, AvailableMemoryMB=3759
2024-12-08T04:27:45,780 WARN  [Time-limited test {}] hbase.ResourceChecker(130): Thread=788 is superior to 500
2024-12-08T04:27:45,785 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}
2024-12-08T04:27:45,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure2.ProcedureExecutor(1098): Stored pid=81, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testConsecutiveExports
2024-12-08T04:27:45,787 INFO  [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=81, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_PRE_OPERATION
2024-12-08T04:27:45,787 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker
2024-12-08T04:27:45,787 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default"
qualifier: "testtb-testConsecutiveExports"
 procId is: 81
2024-12-08T04:27:45,789 INFO  [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=81, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_WRITE_FS_LAYOUT
2024-12-08T04:27:45,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81
2024-12-08T04:27:45,801 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742014_1190 (size=404)
2024-12-08T04:27:45,802 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742014_1190 (size=404)
2024-12-08T04:27:45,802 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742014_1190 (size=404)
2024-12-08T04:27:45,804 INFO  [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 58971a382e19e721d272c8877c6654ff, NAME => 'testtb-testConsecutiveExports,,1733632065784.58971a382e19e721d272c8877c6654ff.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720
2024-12-08T04:27:45,804 INFO  [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 5423631ec99d2cca9328a2c4990b9aff, NAME => 'testtb-testConsecutiveExports,1,1733632065784.5423631ec99d2cca9328a2c4990b9aff.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720
2024-12-08T04:27:45,812 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742016_1192 (size=65)
2024-12-08T04:27:45,812 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742016_1192 (size=65)
2024-12-08T04:27:45,813 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742016_1192 (size=65)
2024-12-08T04:27:45,813 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742015_1191 (size=65)
2024-12-08T04:27:45,814 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testConsecutiveExports,,1733632065784.58971a382e19e721d272c8877c6654ff.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable
2024-12-08T04:27:45,814 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1681): Closing 58971a382e19e721d272c8877c6654ff, disabling compactions & flushes
2024-12-08T04:27:45,814 INFO  [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testConsecutiveExports,,1733632065784.58971a382e19e721d272c8877c6654ff.
2024-12-08T04:27:45,814 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testConsecutiveExports,,1733632065784.58971a382e19e721d272c8877c6654ff.
2024-12-08T04:27:45,814 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742015_1191 (size=65)
2024-12-08T04:27:45,814 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testConsecutiveExports,,1733632065784.58971a382e19e721d272c8877c6654ff. after waiting 0 ms
2024-12-08T04:27:45,814 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testConsecutiveExports,,1733632065784.58971a382e19e721d272c8877c6654ff.
2024-12-08T04:27:45,814 INFO  [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testConsecutiveExports,,1733632065784.58971a382e19e721d272c8877c6654ff.
2024-12-08T04:27:45,814 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1635): Region close journal for 58971a382e19e721d272c8877c6654ff:

2024-12-08T04:27:45,814 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742015_1191 (size=65)
2024-12-08T04:27:45,815 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testConsecutiveExports,1,1733632065784.5423631ec99d2cca9328a2c4990b9aff.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable
2024-12-08T04:27:45,815 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1681): Closing 5423631ec99d2cca9328a2c4990b9aff, disabling compactions & flushes
2024-12-08T04:27:45,815 INFO  [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testConsecutiveExports,1,1733632065784.5423631ec99d2cca9328a2c4990b9aff.
2024-12-08T04:27:45,815 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testConsecutiveExports,1,1733632065784.5423631ec99d2cca9328a2c4990b9aff.
2024-12-08T04:27:45,815 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testConsecutiveExports,1,1733632065784.5423631ec99d2cca9328a2c4990b9aff. after waiting 0 ms
2024-12-08T04:27:45,815 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testConsecutiveExports,1,1733632065784.5423631ec99d2cca9328a2c4990b9aff.
2024-12-08T04:27:45,815 INFO  [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testConsecutiveExports,1,1733632065784.5423631ec99d2cca9328a2c4990b9aff.
2024-12-08T04:27:45,815 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1635): Region close journal for 5423631ec99d2cca9328a2c4990b9aff:

2024-12-08T04:27:45,816 INFO  [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=81, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ADD_TO_META
2024-12-08T04:27:45,817 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,,1733632065784.58971a382e19e721d272c8877c6654ff.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733632065817"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733632065817"}]},"ts":"1733632065817"}
2024-12-08T04:27:45,817 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,1,1733632065784.5423631ec99d2cca9328a2c4990b9aff.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733632065817"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733632065817"}]},"ts":"1733632065817"}
2024-12-08T04:27:45,820 INFO  [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta.
2024-12-08T04:27:45,821 INFO  [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=81, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ASSIGN_REGIONS
2024-12-08T04:27:45,821 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733632065821"}]},"ts":"1733632065821"}
2024-12-08T04:27:45,822 INFO  [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testConsecutiveExports, state=ENABLING in hbase:meta
2024-12-08T04:27:45,826 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(202): Hosts are {428ded7e54d6=0} racks are {/default-rack=0}
2024-12-08T04:27:45,828 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 is on host 0
2024-12-08T04:27:45,828 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 is on host 0
2024-12-08T04:27:45,828 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 is on host 0
2024-12-08T04:27:45,828 INFO  [PEWorker-2 {}] balancer.BalancerClusterState(314): server 0 is on rack 0
2024-12-08T04:27:45,828 INFO  [PEWorker-2 {}] balancer.BalancerClusterState(314): server 1 is on rack 0
2024-12-08T04:27:45,828 INFO  [PEWorker-2 {}] balancer.BalancerClusterState(314): server 2 is on rack 0
2024-12-08T04:27:45,828 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1
2024-12-08T04:27:45,828 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=58971a382e19e721d272c8877c6654ff, ASSIGN}, {pid=83, ppid=81, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=5423631ec99d2cca9328a2c4990b9aff, ASSIGN}]
2024-12-08T04:27:45,829 INFO  [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=83, ppid=81, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=5423631ec99d2cca9328a2c4990b9aff, ASSIGN
2024-12-08T04:27:45,830 INFO  [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=82, ppid=81, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=58971a382e19e721d272c8877c6654ff, ASSIGN
2024-12-08T04:27:45,830 INFO  [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=83, ppid=81, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=5423631ec99d2cca9328a2c4990b9aff, ASSIGN; state=OFFLINE, location=428ded7e54d6,46421,1733631984115; forceNewPlan=false, retain=false
2024-12-08T04:27:45,830 INFO  [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=82, ppid=81, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=58971a382e19e721d272c8877c6654ff, ASSIGN; state=OFFLINE, location=428ded7e54d6,41743,1733631984189; forceNewPlan=false, retain=false
2024-12-08T04:27:45,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81
2024-12-08T04:27:45,981 INFO  [428ded7e54d6:46337 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 
2024-12-08T04:27:45,981 INFO  [PEWorker-5 {}] assignment.RegionStateStore(202): pid=83 updating hbase:meta row=5423631ec99d2cca9328a2c4990b9aff, regionState=OPENING, regionLocation=428ded7e54d6,46421,1733631984115
2024-12-08T04:27:45,981 INFO  [PEWorker-4 {}] assignment.RegionStateStore(202): pid=82 updating hbase:meta row=58971a382e19e721d272c8877c6654ff, regionState=OPENING, regionLocation=428ded7e54d6,41743,1733631984189
2024-12-08T04:27:45,983 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE; OpenRegionProcedure 5423631ec99d2cca9328a2c4990b9aff, server=428ded7e54d6,46421,1733631984115}]
2024-12-08T04:27:45,984 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=85, ppid=82, state=RUNNABLE; OpenRegionProcedure 58971a382e19e721d272c8877c6654ff, server=428ded7e54d6,41743,1733631984189}]
2024-12-08T04:27:46,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81
2024-12-08T04:27:46,135 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,46421,1733631984115
2024-12-08T04:27:46,136 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,41743,1733631984189
2024-12-08T04:27:46,139 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] handler.AssignRegionHandler(135): Open testtb-testConsecutiveExports,1,1733632065784.5423631ec99d2cca9328a2c4990b9aff.
2024-12-08T04:27:46,139 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(7285): Opening region: {ENCODED => 5423631ec99d2cca9328a2c4990b9aff, NAME => 'testtb-testConsecutiveExports,1,1733632065784.5423631ec99d2cca9328a2c4990b9aff.', STARTKEY => '1', ENDKEY => ''}
2024-12-08T04:27:46,140 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testConsecutiveExports,1,1733632065784.5423631ec99d2cca9328a2c4990b9aff. service=AccessControlService
2024-12-08T04:27:46,141 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911.
2024-12-08T04:27:46,141 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] handler.AssignRegionHandler(135): Open testtb-testConsecutiveExports,,1733632065784.58971a382e19e721d272c8877c6654ff.
2024-12-08T04:27:46,141 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(7285): Opening region: {ENCODED => 58971a382e19e721d272c8877c6654ff, NAME => 'testtb-testConsecutiveExports,,1733632065784.58971a382e19e721d272c8877c6654ff.', STARTKEY => '', ENDKEY => '1'}
2024-12-08T04:27:46,141 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports 5423631ec99d2cca9328a2c4990b9aff
2024-12-08T04:27:46,141 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(894): Instantiated testtb-testConsecutiveExports,1,1733632065784.5423631ec99d2cca9328a2c4990b9aff.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable
2024-12-08T04:27:46,141 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(7327): checking encryption for 5423631ec99d2cca9328a2c4990b9aff
2024-12-08T04:27:46,141 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(7330): checking classloading for 5423631ec99d2cca9328a2c4990b9aff
2024-12-08T04:27:46,141 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testConsecutiveExports,,1733632065784.58971a382e19e721d272c8877c6654ff. service=AccessControlService
2024-12-08T04:27:46,141 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911.
2024-12-08T04:27:46,141 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports 58971a382e19e721d272c8877c6654ff
2024-12-08T04:27:46,141 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(894): Instantiated testtb-testConsecutiveExports,,1733632065784.58971a382e19e721d272c8877c6654ff.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable
2024-12-08T04:27:46,142 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(7327): checking encryption for 58971a382e19e721d272c8877c6654ff
2024-12-08T04:27:46,142 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(7330): checking classloading for 58971a382e19e721d272c8877c6654ff
2024-12-08T04:27:46,151 INFO  [StoreOpener-5423631ec99d2cca9328a2c4990b9aff-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 5423631ec99d2cca9328a2c4990b9aff 
2024-12-08T04:27:46,151 INFO  [StoreOpener-58971a382e19e721d272c8877c6654ff-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 58971a382e19e721d272c8877c6654ff 
2024-12-08T04:27:46,153 INFO  [StoreOpener-5423631ec99d2cca9328a2c4990b9aff-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5423631ec99d2cca9328a2c4990b9aff columnFamilyName cf
2024-12-08T04:27:46,154 DEBUG [StoreOpener-5423631ec99d2cca9328a2c4990b9aff-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker
2024-12-08T04:27:46,154 INFO  [StoreOpener-5423631ec99d2cca9328a2c4990b9aff-1 {}] regionserver.HStore(327): Store=5423631ec99d2cca9328a2c4990b9aff/cf,  memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE
2024-12-08T04:27:46,154 INFO  [StoreOpener-58971a382e19e721d272c8877c6654ff-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 58971a382e19e721d272c8877c6654ff columnFamilyName cf
2024-12-08T04:27:46,154 DEBUG [StoreOpener-58971a382e19e721d272c8877c6654ff-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker
2024-12-08T04:27:46,155 INFO  [StoreOpener-58971a382e19e721d272c8877c6654ff-1 {}] regionserver.HStore(327): Store=58971a382e19e721d272c8877c6654ff/cf,  memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE
2024-12-08T04:27:46,155 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testConsecutiveExports/5423631ec99d2cca9328a2c4990b9aff
2024-12-08T04:27:46,156 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testConsecutiveExports/5423631ec99d2cca9328a2c4990b9aff
2024-12-08T04:27:46,156 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testConsecutiveExports/58971a382e19e721d272c8877c6654ff
2024-12-08T04:27:46,156 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testConsecutiveExports/58971a382e19e721d272c8877c6654ff
2024-12-08T04:27:46,158 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(1085): writing seq id for 5423631ec99d2cca9328a2c4990b9aff
2024-12-08T04:27:46,159 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(1085): writing seq id for 58971a382e19e721d272c8877c6654ff
2024-12-08T04:27:46,163 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testConsecutiveExports/5423631ec99d2cca9328a2c4990b9aff/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1
2024-12-08T04:27:46,164 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(1102): Opened 5423631ec99d2cca9328a2c4990b9aff; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59130104, jitterRate=-0.11889278888702393}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1}
2024-12-08T04:27:46,165 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(1001): Region open journal for 5423631ec99d2cca9328a2c4990b9aff:

2024-12-08T04:27:46,167 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testConsecutiveExports,1,1733632065784.5423631ec99d2cca9328a2c4990b9aff., pid=84, masterSystemTime=1733632066135
2024-12-08T04:27:46,168 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testConsecutiveExports/58971a382e19e721d272c8877c6654ff/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1
2024-12-08T04:27:46,169 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(1102): Opened 58971a382e19e721d272c8877c6654ff; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60299249, jitterRate=-0.10147117078304291}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1}
2024-12-08T04:27:46,169 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(1001): Region open journal for 58971a382e19e721d272c8877c6654ff:

2024-12-08T04:27:46,170 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testConsecutiveExports,,1733632065784.58971a382e19e721d272c8877c6654ff., pid=85, masterSystemTime=1733632066136
2024-12-08T04:27:46,176 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testConsecutiveExports,1,1733632065784.5423631ec99d2cca9328a2c4990b9aff.
2024-12-08T04:27:46,176 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] handler.AssignRegionHandler(164): Opened testtb-testConsecutiveExports,1,1733632065784.5423631ec99d2cca9328a2c4990b9aff.
2024-12-08T04:27:46,177 INFO  [PEWorker-1 {}] assignment.RegionStateStore(202): pid=83 updating hbase:meta row=5423631ec99d2cca9328a2c4990b9aff, regionState=OPEN, openSeqNum=2, regionLocation=428ded7e54d6,46421,1733631984115
2024-12-08T04:27:46,177 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testConsecutiveExports,,1733632065784.58971a382e19e721d272c8877c6654ff.
2024-12-08T04:27:46,177 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] handler.AssignRegionHandler(164): Opened testtb-testConsecutiveExports,,1733632065784.58971a382e19e721d272c8877c6654ff.
2024-12-08T04:27:46,178 INFO  [PEWorker-5 {}] assignment.RegionStateStore(202): pid=82 updating hbase:meta row=58971a382e19e721d272c8877c6654ff, regionState=OPEN, openSeqNum=2, regionLocation=428ded7e54d6,41743,1733631984189
2024-12-08T04:27:46,183 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=84, resume processing ppid=83
2024-12-08T04:27:46,183 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=84, ppid=83, state=SUCCESS; OpenRegionProcedure 5423631ec99d2cca9328a2c4990b9aff, server=428ded7e54d6,46421,1733631984115 in 197 msec
2024-12-08T04:27:46,187 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=85, resume processing ppid=82
2024-12-08T04:27:46,188 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=83, ppid=81, state=SUCCESS; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=5423631ec99d2cca9328a2c4990b9aff, ASSIGN in 355 msec
2024-12-08T04:27:46,188 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=85, ppid=82, state=SUCCESS; OpenRegionProcedure 58971a382e19e721d272c8877c6654ff, server=428ded7e54d6,41743,1733631984189 in 197 msec
2024-12-08T04:27:46,189 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=82, resume processing ppid=81
2024-12-08T04:27:46,189 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=82, ppid=81, state=SUCCESS; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=58971a382e19e721d272c8877c6654ff, ASSIGN in 359 msec
2024-12-08T04:27:46,190 INFO  [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=81, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_UPDATE_DESC_CACHE
2024-12-08T04:27:46,190 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733632066190"}]},"ts":"1733632066190"}
2024-12-08T04:27:46,192 INFO  [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testConsecutiveExports, state=ENABLED in hbase:meta
2024-12-08T04:27:46,195 INFO  [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=81, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_POST_OPERATION
2024-12-08T04:27:46,195 DEBUG [PEWorker-3 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testConsecutiveExports jenkins: RWXCA
2024-12-08T04:27:46,198 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41743 {}] access.PermissionStorage(611): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA]
2024-12-08T04:27:46,200 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:27:46,200 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46421-0x1006fe072e80002, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:27:46,200 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:27:46,200 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:27:46,202 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04
2024-12-08T04:27:46,202 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04
2024-12-08T04:27:46,202 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04
2024-12-08T04:27:46,203 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04
2024-12-08T04:27:46,204 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=81, state=SUCCESS; CreateTableProcedure table=testtb-testConsecutiveExports in 417 msec
2024-12-08T04:27:46,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81
2024-12-08T04:27:46,396 INFO  [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testConsecutiveExports, procId: 81 completed
2024-12-08T04:27:46,396 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testConsecutiveExports get assigned. Timeout = 60000ms
2024-12-08T04:27:46,396 INFO  [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1])
2024-12-08T04:27:46,404 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testConsecutiveExports assigned to meta. Checking AM states.
2024-12-08T04:27:46,404 INFO  [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1])
2024-12-08T04:27:46,404 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testConsecutiveExports assigned.
2024-12-08T04:27:46,408 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }
2024-12-08T04:27:46,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733632066408 (current time:1733632066408).
2024-12-08T04:27:46,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0
2024-12-08T04:27:46,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testConsecutiveExports VERSION not specified, setting to 2
2024-12-08T04:27:46,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot
2024-12-08T04:27:46,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x106db6f6 to 127.0.0.1:55878 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@20990ab6
2024-12-08T04:27:46,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@33d6c75d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null
2024-12-08T04:27:46,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false
2024-12-08T04:27:46,427 INFO  [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49990, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService
2024-12-08T04:27:46,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x106db6f6 to 127.0.0.1:55878
2024-12-08T04:27:46,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(514): Stopping rpc client
2024-12-08T04:27:46,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3b6f79a0 to 127.0.0.1:55878 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7d4b16b4
2024-12-08T04:27:46,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78a2ca6f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null
2024-12-08T04:27:46,444 DEBUG [hconnection-0x36a761af-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false
2024-12-08T04:27:46,449 INFO  [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50006, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService
2024-12-08T04:27:46,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3b6f79a0 to 127.0.0.1:55878
2024-12-08T04:27:46,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(514): Stopping rpc client
2024-12-08T04:27:46,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] access.PermissionStorage(611): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA]
2024-12-08T04:27:46,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot...
2024-12-08T04:27:46,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure2.ProcedureExecutor(1098): Stored pid=86, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }
2024-12-08T04:27:46,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 86
2024-12-08T04:27:46,460 INFO  [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE
2024-12-08T04:27:46,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=86
2024-12-08T04:27:46,462 INFO  [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION
2024-12-08T04:27:46,465 INFO  [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO
2024-12-08T04:27:46,486 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742017_1193 (size=161)
2024-12-08T04:27:46,486 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742017_1193 (size=161)
2024-12-08T04:27:46,487 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742017_1193 (size=161)
2024-12-08T04:27:46,490 INFO  [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS
2024-12-08T04:27:46,491 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=87, ppid=86, state=RUNNABLE; SnapshotRegionProcedure 58971a382e19e721d272c8877c6654ff}, {pid=88, ppid=86, state=RUNNABLE; SnapshotRegionProcedure 5423631ec99d2cca9328a2c4990b9aff}]
2024-12-08T04:27:46,494 INFO  [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=88, ppid=86, state=RUNNABLE; SnapshotRegionProcedure 5423631ec99d2cca9328a2c4990b9aff
2024-12-08T04:27:46,494 INFO  [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=87, ppid=86, state=RUNNABLE; SnapshotRegionProcedure 58971a382e19e721d272c8877c6654ff
2024-12-08T04:27:46,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=86
2024-12-08T04:27:46,645 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,46421,1733631984115
2024-12-08T04:27:46,645 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,41743,1733631984189
2024-12-08T04:27:46,646 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46421 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=88
2024-12-08T04:27:46,646 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41743 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=87
2024-12-08T04:27:46,646 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1733632065784.5423631ec99d2cca9328a2c4990b9aff.
2024-12-08T04:27:46,646 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1733632065784.58971a382e19e721d272c8877c6654ff.
2024-12-08T04:27:46,646 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] regionserver.HRegion(2538): Flush status journal for 5423631ec99d2cca9328a2c4990b9aff:

2024-12-08T04:27:46,646 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] regionserver.HRegion(2538): Flush status journal for 58971a382e19e721d272c8877c6654ff:

2024-12-08T04:27:46,647 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1733632065784.5423631ec99d2cca9328a2c4990b9aff. for emptySnaptb0-testConsecutiveExports completed.
2024-12-08T04:27:46,647 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1733632065784.58971a382e19e721d272c8877c6654ff. for emptySnaptb0-testConsecutiveExports completed.
2024-12-08T04:27:46,647 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1733632065784.5423631ec99d2cca9328a2c4990b9aff.' region-info for snapshot=emptySnaptb0-testConsecutiveExports
2024-12-08T04:27:46,647 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] snapshot.SnapshotManifest(246): Creating references for hfiles
2024-12-08T04:27:46,647 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles
2024-12-08T04:27:46,647 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1733632065784.58971a382e19e721d272c8877c6654ff.' region-info for snapshot=emptySnaptb0-testConsecutiveExports
2024-12-08T04:27:46,647 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] snapshot.SnapshotManifest(246): Creating references for hfiles
2024-12-08T04:27:46,647 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles
2024-12-08T04:27:46,662 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742019_1195 (size=68)
2024-12-08T04:27:46,662 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742019_1195 (size=68)
2024-12-08T04:27:46,663 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742019_1195 (size=68)
2024-12-08T04:27:46,663 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1733632065784.58971a382e19e721d272c8877c6654ff.
2024-12-08T04:27:46,663 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=87
2024-12-08T04:27:46,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster(4106): Remote procedure done, pid=87
2024-12-08T04:27:46,664 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region 58971a382e19e721d272c8877c6654ff
2024-12-08T04:27:46,664 INFO  [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=87, ppid=86, state=RUNNABLE; SnapshotRegionProcedure 58971a382e19e721d272c8877c6654ff
2024-12-08T04:27:46,666 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=87, ppid=86, state=SUCCESS; SnapshotRegionProcedure 58971a382e19e721d272c8877c6654ff in 175 msec
2024-12-08T04:27:46,674 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742018_1194 (size=68)
2024-12-08T04:27:46,674 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742018_1194 (size=68)
2024-12-08T04:27:46,674 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742018_1194 (size=68)
2024-12-08T04:27:46,675 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1733632065784.5423631ec99d2cca9328a2c4990b9aff.
2024-12-08T04:27:46,675 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=88
2024-12-08T04:27:46,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster(4106): Remote procedure done, pid=88
2024-12-08T04:27:46,676 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region 5423631ec99d2cca9328a2c4990b9aff
2024-12-08T04:27:46,676 INFO  [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=88, ppid=86, state=RUNNABLE; SnapshotRegionProcedure 5423631ec99d2cca9328a2c4990b9aff
2024-12-08T04:27:46,678 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=88, resume processing ppid=86
2024-12-08T04:27:46,679 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=88, ppid=86, state=SUCCESS; SnapshotRegionProcedure 5423631ec99d2cca9328a2c4990b9aff in 187 msec
2024-12-08T04:27:46,679 INFO  [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS
2024-12-08T04:27:46,679 INFO  [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION
2024-12-08T04:27:46,680 INFO  [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT
2024-12-08T04:27:46,680 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testConsecutiveExports
2024-12-08T04:27:46,681 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports
2024-12-08T04:27:46,694 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742020_1196 (size=543)
2024-12-08T04:27:46,694 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742020_1196 (size=543)
2024-12-08T04:27:46,694 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742020_1196 (size=543)
2024-12-08T04:27:46,700 INFO  [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT
2024-12-08T04:27:46,705 INFO  [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT
2024-12-08T04:27:46,705 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/emptySnaptb0-testConsecutiveExports
2024-12-08T04:27:46,707 INFO  [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION
2024-12-08T04:27:46,707 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 86
2024-12-08T04:27:46,708 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=86, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 249 msec
2024-12-08T04:27:46,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=86
2024-12-08T04:27:46,764 INFO  [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports, procId: 86 completed
2024-12-08T04:27:46,771 INFO  [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41743 {}] regionserver.HRegion(8254): writing data to region testtb-testConsecutiveExports,,1733632065784.58971a382e19e721d272c8877c6654ff. with WAL disabled. Data may be lost in the event of a crash.
2024-12-08T04:27:46,772 INFO  [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46421 {}] regionserver.HRegion(8254): writing data to region testtb-testConsecutiveExports,1,1733632065784.5423631ec99d2cca9328a2c4990b9aff. with WAL disabled. Data may be lost in the event of a crash.
2024-12-08T04:27:46,775 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testConsecutiveExports
2024-12-08T04:27:46,775 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testConsecutiveExports,,1733632065784.58971a382e19e721d272c8877c6654ff.
2024-12-08T04:27:46,776 INFO  [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1])
2024-12-08T04:27:46,794 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }
2024-12-08T04:27:46,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733632066794 (current time:1733632066794).
2024-12-08T04:27:46,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0
2024-12-08T04:27:46,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testConsecutiveExports VERSION not specified, setting to 2
2024-12-08T04:27:46,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot
2024-12-08T04:27:46,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3d81b3d6 to 127.0.0.1:55878 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@30a607d0
2024-12-08T04:27:46,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6b5d0dda, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null
2024-12-08T04:27:46,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false
2024-12-08T04:27:46,804 INFO  [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50010, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService
2024-12-08T04:27:46,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3d81b3d6 to 127.0.0.1:55878
2024-12-08T04:27:46,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(514): Stopping rpc client
2024-12-08T04:27:46,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7f9bf2c0 to 127.0.0.1:55878 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7f83f264
2024-12-08T04:27:46,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7cb31393, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null
2024-12-08T04:27:46,812 DEBUG [hconnection-0x14d0f809-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false
2024-12-08T04:27:46,814 INFO  [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50022, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService
2024-12-08T04:27:46,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7f9bf2c0 to 127.0.0.1:55878
2024-12-08T04:27:46,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(514): Stopping rpc client
2024-12-08T04:27:46,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] access.PermissionStorage(611): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA]
2024-12-08T04:27:46,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot...
2024-12-08T04:27:46,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure2.ProcedureExecutor(1098): Stored pid=89, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }
2024-12-08T04:27:46,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 89
2024-12-08T04:27:46,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89
2024-12-08T04:27:46,822 INFO  [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE
2024-12-08T04:27:46,823 INFO  [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION
2024-12-08T04:27:46,826 INFO  [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO
2024-12-08T04:27:46,840 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742021_1197 (size=156)
2024-12-08T04:27:46,841 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742021_1197 (size=156)
2024-12-08T04:27:46,841 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742021_1197 (size=156)
2024-12-08T04:27:46,841 INFO  [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS
2024-12-08T04:27:46,842 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE; SnapshotRegionProcedure 58971a382e19e721d272c8877c6654ff}, {pid=91, ppid=89, state=RUNNABLE; SnapshotRegionProcedure 5423631ec99d2cca9328a2c4990b9aff}]
2024-12-08T04:27:46,843 INFO  [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=91, ppid=89, state=RUNNABLE; SnapshotRegionProcedure 5423631ec99d2cca9328a2c4990b9aff
2024-12-08T04:27:46,843 INFO  [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=90, ppid=89, state=RUNNABLE; SnapshotRegionProcedure 58971a382e19e721d272c8877c6654ff
2024-12-08T04:27:46,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89
2024-12-08T04:27:46,994 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,46421,1733631984115
2024-12-08T04:27:46,994 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,41743,1733631984189
2024-12-08T04:27:46,994 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46421 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=91
2024-12-08T04:27:46,994 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41743 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=90
2024-12-08T04:27:46,994 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1733632065784.58971a382e19e721d272c8877c6654ff.
2024-12-08T04:27:46,994 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1733632065784.5423631ec99d2cca9328a2c4990b9aff.
2024-12-08T04:27:46,995 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.HRegion(2837): Flushing 58971a382e19e721d272c8877c6654ff 1/1 column families, dataSize=199 B heapSize=688 B
2024-12-08T04:27:46,995 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.HRegion(2837): Flushing 5423631ec99d2cca9328a2c4990b9aff 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB
2024-12-08T04:27:47,012 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testConsecutiveExports/58971a382e19e721d272c8877c6654ff/.tmp/cf/f0c964fcbc534b83866820655a2bf386 is 71, key is 016542bcb4d725c7515759359761c85f/cf:q/1733632066771/Put/seqid=0
2024-12-08T04:27:47,020 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742022_1198 (size=5286)
2024-12-08T04:27:47,021 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742022_1198 (size=5286)
2024-12-08T04:27:47,021 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742022_1198 (size=5286)
2024-12-08T04:27:47,021 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testConsecutiveExports/5423631ec99d2cca9328a2c4990b9aff/.tmp/cf/8041f23762854868a31ad57513c65c99 is 71, key is 11f3ad9d537951c54486bcb50e76aa61/cf:q/1733632066772/Put/seqid=0
2024-12-08T04:27:47,023 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=199 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testConsecutiveExports/58971a382e19e721d272c8877c6654ff/.tmp/cf/f0c964fcbc534b83866820655a2bf386
2024-12-08T04:27:47,029 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742023_1199 (size=8324)
2024-12-08T04:27:47,030 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742023_1199 (size=8324)
2024-12-08T04:27:47,030 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testConsecutiveExports/58971a382e19e721d272c8877c6654ff/.tmp/cf/f0c964fcbc534b83866820655a2bf386 as hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testConsecutiveExports/58971a382e19e721d272c8877c6654ff/cf/f0c964fcbc534b83866820655a2bf386
2024-12-08T04:27:47,032 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742023_1199 (size=8324)
2024-12-08T04:27:47,034 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.06 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testConsecutiveExports/5423631ec99d2cca9328a2c4990b9aff/.tmp/cf/8041f23762854868a31ad57513c65c99
2024-12-08T04:27:47,037 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testConsecutiveExports/58971a382e19e721d272c8877c6654ff/cf/f0c964fcbc534b83866820655a2bf386, entries=3, sequenceid=6, filesize=5.2 K
2024-12-08T04:27:47,038 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.HRegion(3040): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for 58971a382e19e721d272c8877c6654ff in 43ms, sequenceid=6, compaction requested=false
2024-12-08T04:27:47,038 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testConsecutiveExports'
2024-12-08T04:27:47,039 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.HRegion(2538): Flush status journal for 58971a382e19e721d272c8877c6654ff:

2024-12-08T04:27:47,039 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1733632065784.58971a382e19e721d272c8877c6654ff. for snaptb0-testConsecutiveExports completed.
2024-12-08T04:27:47,040 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testConsecutiveExports/5423631ec99d2cca9328a2c4990b9aff/.tmp/cf/8041f23762854868a31ad57513c65c99 as hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testConsecutiveExports/5423631ec99d2cca9328a2c4990b9aff/cf/8041f23762854868a31ad57513c65c99
2024-12-08T04:27:47,040 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1733632065784.58971a382e19e721d272c8877c6654ff.' region-info for snapshot=snaptb0-testConsecutiveExports
2024-12-08T04:27:47,040 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] snapshot.SnapshotManifest(246): Creating references for hfiles
2024-12-08T04:27:47,040 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testConsecutiveExports/58971a382e19e721d272c8877c6654ff/cf/f0c964fcbc534b83866820655a2bf386] hfiles
2024-12-08T04:27:47,040 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testConsecutiveExports/58971a382e19e721d272c8877c6654ff/cf/f0c964fcbc534b83866820655a2bf386 for snapshot=snaptb0-testConsecutiveExports
2024-12-08T04:27:47,045 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testConsecutiveExports/5423631ec99d2cca9328a2c4990b9aff/cf/8041f23762854868a31ad57513c65c99, entries=47, sequenceid=6, filesize=8.1 K
2024-12-08T04:27:47,046 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.HRegion(3040): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for 5423631ec99d2cca9328a2c4990b9aff in 51ms, sequenceid=6, compaction requested=false
2024-12-08T04:27:47,046 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.HRegion(2538): Flush status journal for 5423631ec99d2cca9328a2c4990b9aff:

2024-12-08T04:27:47,046 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1733632065784.5423631ec99d2cca9328a2c4990b9aff. for snaptb0-testConsecutiveExports completed.
2024-12-08T04:27:47,047 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1733632065784.5423631ec99d2cca9328a2c4990b9aff.' region-info for snapshot=snaptb0-testConsecutiveExports
2024-12-08T04:27:47,047 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] snapshot.SnapshotManifest(246): Creating references for hfiles
2024-12-08T04:27:47,047 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testConsecutiveExports/5423631ec99d2cca9328a2c4990b9aff/cf/8041f23762854868a31ad57513c65c99] hfiles
2024-12-08T04:27:47,047 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testConsecutiveExports/5423631ec99d2cca9328a2c4990b9aff/cf/8041f23762854868a31ad57513c65c99 for snapshot=snaptb0-testConsecutiveExports
2024-12-08T04:27:47,057 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742024_1200 (size=107)
2024-12-08T04:27:47,058 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742024_1200 (size=107)
2024-12-08T04:27:47,058 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742024_1200 (size=107)
2024-12-08T04:27:47,059 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1733632065784.58971a382e19e721d272c8877c6654ff.
2024-12-08T04:27:47,059 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=90
2024-12-08T04:27:47,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster(4106): Remote procedure done, pid=90
2024-12-08T04:27:47,060 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region 58971a382e19e721d272c8877c6654ff
2024-12-08T04:27:47,060 INFO  [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=90, ppid=89, state=RUNNABLE; SnapshotRegionProcedure 58971a382e19e721d272c8877c6654ff
2024-12-08T04:27:47,062 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=90, ppid=89, state=SUCCESS; SnapshotRegionProcedure 58971a382e19e721d272c8877c6654ff in 219 msec
2024-12-08T04:27:47,068 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742025_1201 (size=107)
2024-12-08T04:27:47,068 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742025_1201 (size=107)
2024-12-08T04:27:47,069 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742025_1201 (size=107)
2024-12-08T04:27:47,069 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1733632065784.5423631ec99d2cca9328a2c4990b9aff.
2024-12-08T04:27:47,069 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=91
2024-12-08T04:27:47,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster(4106): Remote procedure done, pid=91
2024-12-08T04:27:47,070 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region 5423631ec99d2cca9328a2c4990b9aff
2024-12-08T04:27:47,070 INFO  [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=91, ppid=89, state=RUNNABLE; SnapshotRegionProcedure 5423631ec99d2cca9328a2c4990b9aff
2024-12-08T04:27:47,072 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=91, resume processing ppid=89
2024-12-08T04:27:47,072 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=91, ppid=89, state=SUCCESS; SnapshotRegionProcedure 5423631ec99d2cca9328a2c4990b9aff in 228 msec
2024-12-08T04:27:47,072 INFO  [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS
2024-12-08T04:27:47,073 INFO  [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION
2024-12-08T04:27:47,073 INFO  [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT
2024-12-08T04:27:47,073 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testConsecutiveExports
2024-12-08T04:27:47,074 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports
2024-12-08T04:27:47,082 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742026_1202 (size=621)
2024-12-08T04:27:47,082 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742026_1202 (size=621)
2024-12-08T04:27:47,083 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742026_1202 (size=621)
2024-12-08T04:27:47,086 INFO  [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT
2024-12-08T04:27:47,092 INFO  [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT
2024-12-08T04:27:47,093 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/snaptb0-testConsecutiveExports
2024-12-08T04:27:47,096 INFO  [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION
2024-12-08T04:27:47,096 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 89
2024-12-08T04:27:47,097 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=89, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 278 msec
2024-12-08T04:27:47,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89
2024-12-08T04:27:47,123 INFO  [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports, procId: 89 completed
2024-12-08T04:27:47,123 INFO  [Time-limited test {}] snapshot.TestExportSnapshot(476): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/local-export-1733632067123
2024-12-08T04:27:47,123 INFO  [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/local-export-1733632067123, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/local-export-1733632067123, srcFsUri=hdfs://localhost:41407, srcDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720
2024-12-08T04:27:47,168 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:41407, inputRoot=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720
2024-12-08T04:27:47,168 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=org.apache.hadoop.fs.LocalFileSystem@66b9faa9, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/local-export-1733632067123, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/local-export-1733632067123/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports
2024-12-08T04:27:47,170 INFO  [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity.
2024-12-08T04:27:47,175 INFO  [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/local-export-1733632067123/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports
2024-12-08T04:27:47,202 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar
2024-12-08T04:27:47,202 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar
2024-12-08T04:27:47,203 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar
2024-12-08T04:27:47,203 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar
2024-12-08T04:27:48,219 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/hadoop-5142159951963132000.jar
2024-12-08T04:27:48,220 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar
2024-12-08T04:27:48,220 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar
2024-12-08T04:27:48,289 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/hadoop-15412232327307382862.jar
2024-12-08T04:27:48,290 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar
2024-12-08T04:27:48,290 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar
2024-12-08T04:27:48,290 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar
2024-12-08T04:27:48,290 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar
2024-12-08T04:27:48,291 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar
2024-12-08T04:27:48,291 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar
2024-12-08T04:27:48,291 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar
2024-12-08T04:27:48,291 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar
2024-12-08T04:27:48,292 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar
2024-12-08T04:27:48,292 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar
2024-12-08T04:27:48,292 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar
2024-12-08T04:27:48,292 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar
2024-12-08T04:27:48,293 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar
2024-12-08T04:27:48,293 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar
2024-12-08T04:27:48,293 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar
2024-12-08T04:27:48,294 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar
2024-12-08T04:27:48,294 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar
2024-12-08T04:27:48,294 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar
2024-12-08T04:27:48,295 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar
2024-12-08T04:27:48,295 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar
2024-12-08T04:27:48,295 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar
2024-12-08T04:27:48,296 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar
2024-12-08T04:27:48,296 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar
2024-12-08T04:27:48,296 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar
2024-12-08T04:27:48,296 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar
2024-12-08T04:27:48,354 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742027_1203 (size=127628)
2024-12-08T04:27:48,354 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742027_1203 (size=127628)
2024-12-08T04:27:48,355 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742027_1203 (size=127628)
2024-12-08T04:27:48,369 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742028_1204 (size=2172101)
2024-12-08T04:27:48,370 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742028_1204 (size=2172101)
2024-12-08T04:27:48,370 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742028_1204 (size=2172101)
2024-12-08T04:27:48,381 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742029_1205 (size=213228)
2024-12-08T04:27:48,381 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742029_1205 (size=213228)
2024-12-08T04:27:48,381 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742029_1205 (size=213228)
2024-12-08T04:27:48,394 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742030_1206 (size=1877034)
2024-12-08T04:27:48,394 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742030_1206 (size=1877034)
2024-12-08T04:27:48,395 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742030_1206 (size=1877034)
2024-12-08T04:27:48,432 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742031_1207 (size=533455)
2024-12-08T04:27:48,432 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742031_1207 (size=533455)
2024-12-08T04:27:48,432 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742031_1207 (size=533455)
2024-12-08T04:27:48,468 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742032_1208 (size=7280644)
2024-12-08T04:27:48,468 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742032_1208 (size=7280644)
2024-12-08T04:27:48,469 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742032_1208 (size=7280644)
2024-12-08T04:27:48,494 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742033_1209 (size=4188619)
2024-12-08T04:27:48,494 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742033_1209 (size=4188619)
2024-12-08T04:27:48,495 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742033_1209 (size=4188619)
2024-12-08T04:27:48,504 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742034_1210 (size=20406)
2024-12-08T04:27:48,505 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742034_1210 (size=20406)
2024-12-08T04:27:48,505 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742034_1210 (size=20406)
2024-12-08T04:27:48,517 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742035_1211 (size=75495)
2024-12-08T04:27:48,517 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742035_1211 (size=75495)
2024-12-08T04:27:48,518 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742035_1211 (size=75495)
2024-12-08T04:27:48,525 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742036_1212 (size=45609)
2024-12-08T04:27:48,525 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742036_1212 (size=45609)
2024-12-08T04:27:48,525 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742036_1212 (size=45609)
2024-12-08T04:27:48,532 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742037_1213 (size=110084)
2024-12-08T04:27:48,533 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742037_1213 (size=110084)
2024-12-08T04:27:48,533 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742037_1213 (size=110084)
2024-12-08T04:27:48,544 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742038_1214 (size=1323991)
2024-12-08T04:27:48,544 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742038_1214 (size=1323991)
2024-12-08T04:27:48,544 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742038_1214 (size=1323991)
2024-12-08T04:27:48,551 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742039_1215 (size=23076)
2024-12-08T04:27:48,552 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742039_1215 (size=23076)
2024-12-08T04:27:48,552 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742039_1215 (size=23076)
2024-12-08T04:27:48,559 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742040_1216 (size=126803)
2024-12-08T04:27:48,559 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742040_1216 (size=126803)
2024-12-08T04:27:48,560 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742040_1216 (size=126803)
2024-12-08T04:27:48,567 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742041_1217 (size=322274)
2024-12-08T04:27:48,568 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742041_1217 (size=322274)
2024-12-08T04:27:48,568 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742041_1217 (size=322274)
2024-12-08T04:27:48,585 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742042_1218 (size=451756)
2024-12-08T04:27:48,585 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742042_1218 (size=451756)
2024-12-08T04:27:48,585 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742042_1218 (size=451756)
2024-12-08T04:27:48,599 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742043_1219 (size=1832290)
2024-12-08T04:27:48,599 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742043_1219 (size=1832290)
2024-12-08T04:27:48,602 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742043_1219 (size=1832290)
2024-12-08T04:27:48,618 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742044_1220 (size=30081)
2024-12-08T04:27:48,618 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742044_1220 (size=30081)
2024-12-08T04:27:48,619 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742044_1220 (size=30081)
2024-12-08T04:27:48,635 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742045_1221 (size=53616)
2024-12-08T04:27:48,635 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742045_1221 (size=53616)
2024-12-08T04:27:48,635 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742045_1221 (size=53616)
2024-12-08T04:27:49,043 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742046_1222 (size=29229)
2024-12-08T04:27:49,044 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742046_1222 (size=29229)
2024-12-08T04:27:49,044 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742046_1222 (size=29229)
2024-12-08T04:27:49,051 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742047_1223 (size=169089)
2024-12-08T04:27:49,052 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742047_1223 (size=169089)
2024-12-08T04:27:49,052 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742047_1223 (size=169089)
2024-12-08T04:27:49,075 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742048_1224 (size=6350155)
2024-12-08T04:27:49,076 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742048_1224 (size=6350155)
2024-12-08T04:27:49,076 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742048_1224 (size=6350155)
2024-12-08T04:27:49,110 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742049_1225 (size=5175431)
2024-12-08T04:27:49,110 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742049_1225 (size=5175431)
2024-12-08T04:27:49,111 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742049_1225 (size=5175431)
2024-12-08T04:27:49,125 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742050_1226 (size=136454)
2024-12-08T04:27:49,125 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742050_1226 (size=136454)
2024-12-08T04:27:49,125 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742050_1226 (size=136454)
2024-12-08T04:27:49,139 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742051_1227 (size=907852)
2024-12-08T04:27:49,139 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742051_1227 (size=907852)
2024-12-08T04:27:49,140 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742051_1227 (size=907852)
2024-12-08T04:27:49,168 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742052_1228 (size=3317408)
2024-12-08T04:27:49,168 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742052_1228 (size=3317408)
2024-12-08T04:27:49,169 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742052_1228 (size=3317408)
2024-12-08T04:27:49,183 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742053_1229 (size=503880)
2024-12-08T04:27:49,183 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742053_1229 (size=503880)
2024-12-08T04:27:49,184 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742053_1229 (size=503880)
2024-12-08T04:27:49,224 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742054_1230 (size=4695811)
2024-12-08T04:27:49,225 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742054_1230 (size=4695811)
2024-12-08T04:27:49,225 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742054_1230 (size=4695811)
2024-12-08T04:27:49,227 WARN  [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set.  User classes may not be found. See Job or Job#setJar(String).
2024-12-08T04:27:49,229 INFO  [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list
2024-12-08T04:27:49,232 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K
2024-12-08T04:27:49,245 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742055_1231 (size=338)
2024-12-08T04:27:49,245 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742055_1231 (size=338)
2024-12-08T04:27:49,246 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742055_1231 (size=338)
2024-12-08T04:27:49,252 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742056_1232 (size=15)
2024-12-08T04:27:49,253 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742056_1232 (size=15)
2024-12-08T04:27:49,253 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742056_1232 (size=15)
2024-12-08T04:27:49,268 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742057_1233 (size=304932)
2024-12-08T04:27:49,268 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742057_1233 (size=304932)
2024-12-08T04:27:49,269 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742057_1233 (size=304932)
2024-12-08T04:27:49,785 WARN  [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start
2024-12-08T04:27:49,785 WARN  [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start
2024-12-08T04:27:49,790 INFO  [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733631992429_0003_000001 (auth:SIMPLE) from 127.0.0.1:36704
2024-12-08T04:27:49,812 WARN  [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-0_2/usercache/jenkins/appcache/application_1733631992429_0003/container_1733631992429_0003_01_000001/launch_container.sh]
2024-12-08T04:27:49,812 WARN  [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-0_2/usercache/jenkins/appcache/application_1733631992429_0003/container_1733631992429_0003_01_000001/container_tokens]
2024-12-08T04:27:49,813 WARN  [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-0_2/usercache/jenkins/appcache/application_1733631992429_0003/container_1733631992429_0003_01_000001/sysfs]
2024-12-08T04:27:50,480 INFO  [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733631992429_0004_000001 (auth:SIMPLE) from 127.0.0.1:39518
2024-12-08T04:27:51,008 WARN  [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties
2024-12-08T04:27:52,003 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details.
2024-12-08T04:27:53,632 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports
2024-12-08T04:27:53,632 INFO  [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports Metrics about Tables on a single HBase RegionServer
2024-12-08T04:27:53,633 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState
2024-12-08T04:27:56,866 INFO  [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733631992429_0004_000001 (auth:SIMPLE) from 127.0.0.1:44016
2024-12-08T04:27:57,379 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742058_1234 (size=350606)
2024-12-08T04:27:57,383 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742058_1234 (size=350606)
2024-12-08T04:27:57,383 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742058_1234 (size=350606)
2024-12-08T04:27:59,136 WARN  [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties
2024-12-08T04:27:59,263 INFO  [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733631992429_0004_000001 (auth:SIMPLE) from 127.0.0.1:41838
2024-12-08T04:28:03,483 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742059_1235 (size=17447)
2024-12-08T04:28:03,483 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742059_1235 (size=17447)
2024-12-08T04:28:03,483 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742059_1235 (size=17447)
2024-12-08T04:28:03,496 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742060_1236 (size=462)
2024-12-08T04:28:03,496 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742060_1236 (size=462)
2024-12-08T04:28:03,496 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742060_1236 (size=462)
2024-12-08T04:28:03,538 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742061_1237 (size=17447)
2024-12-08T04:28:03,538 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742061_1237 (size=17447)
2024-12-08T04:28:03,538 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742061_1237 (size=17447)
2024-12-08T04:28:03,551 WARN  [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-1_3/usercache/jenkins/appcache/application_1733631992429_0004/container_1733631992429_0004_01_000002/launch_container.sh]
2024-12-08T04:28:03,551 WARN  [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-1_3/usercache/jenkins/appcache/application_1733631992429_0004/container_1733631992429_0004_01_000002/container_tokens]
2024-12-08T04:28:03,551 WARN  [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-1_3/usercache/jenkins/appcache/application_1733631992429_0004/container_1733631992429_0004_01_000002/sysfs]
2024-12-08T04:28:03,562 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742062_1238 (size=350606)
2024-12-08T04:28:03,562 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742062_1238 (size=350606)
2024-12-08T04:28:03,563 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742062_1238 (size=350606)
2024-12-08T04:28:03,578 INFO  [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733631992429_0004_000001 (auth:SIMPLE) from 127.0.0.1:41204
2024-12-08T04:28:05,539 INFO  [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export
2024-12-08T04:28:05,540 INFO  [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity.
2024-12-08T04:28:05,546 INFO  [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb0-testConsecutiveExports
2024-12-08T04:28:05,546 INFO  [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot
2024-12-08T04:28:05,546 INFO  [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state
2024-12-08T04:28:05,546 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1548841327_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/snaptb0-testConsecutiveExports
2024-12-08T04:28:05,549 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo
2024-12-08T04:28:05,549 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest
2024-12-08T04:28:05,549 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in org.apache.hadoop.fs.LocalFileSystem@66b9faa9 in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/local-export-1733632067123/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/local-export-1733632067123/.hbase-snapshot/snaptb0-testConsecutiveExports
2024-12-08T04:28:05,549 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/local-export-1733632067123/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest
2024-12-08T04:28:05,549 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/local-export-1733632067123/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo
2024-12-08T04:28:05,561 INFO  [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/local-export-1733632067123, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/local-export-1733632067123, srcFsUri=hdfs://localhost:41407, srcDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720
2024-12-08T04:28:05,613 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:41407, inputRoot=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720
2024-12-08T04:28:05,613 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=org.apache.hadoop.fs.LocalFileSystem@66b9faa9, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/local-export-1733632067123, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/local-export-1733632067123/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports
2024-12-08T04:28:05,616 INFO  [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity.
2024-12-08T04:28:05,633 INFO  [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/local-export-1733632067123/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports
2024-12-08T04:28:05,676 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar
2024-12-08T04:28:05,677 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar
2024-12-08T04:28:05,677 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar
2024-12-08T04:28:05,677 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar
2024-12-08T04:28:06,981 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/hadoop-5709731951314586068.jar
2024-12-08T04:28:06,982 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar
2024-12-08T04:28:06,982 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar
2024-12-08T04:28:07,065 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/hadoop-15200818842684952633.jar
2024-12-08T04:28:07,065 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar
2024-12-08T04:28:07,066 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar
2024-12-08T04:28:07,066 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar
2024-12-08T04:28:07,066 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar
2024-12-08T04:28:07,067 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar
2024-12-08T04:28:07,067 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar
2024-12-08T04:28:07,067 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar
2024-12-08T04:28:07,068 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar
2024-12-08T04:28:07,068 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar
2024-12-08T04:28:07,068 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar
2024-12-08T04:28:07,069 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar
2024-12-08T04:28:07,069 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar
2024-12-08T04:28:07,070 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar
2024-12-08T04:28:07,070 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar
2024-12-08T04:28:07,070 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar
2024-12-08T04:28:07,070 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar
2024-12-08T04:28:07,070 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar
2024-12-08T04:28:07,071 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar
2024-12-08T04:28:07,071 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar
2024-12-08T04:28:07,071 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar
2024-12-08T04:28:07,071 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar
2024-12-08T04:28:07,072 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar
2024-12-08T04:28:07,072 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar
2024-12-08T04:28:07,072 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar
2024-12-08T04:28:07,072 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar
2024-12-08T04:28:07,142 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742063_1239 (size=127628)
2024-12-08T04:28:07,142 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742063_1239 (size=127628)
2024-12-08T04:28:07,142 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742063_1239 (size=127628)
2024-12-08T04:28:07,159 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742064_1240 (size=2172101)
2024-12-08T04:28:07,159 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742064_1240 (size=2172101)
2024-12-08T04:28:07,160 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742064_1240 (size=2172101)
2024-12-08T04:28:07,169 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742065_1241 (size=213228)
2024-12-08T04:28:07,169 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742065_1241 (size=213228)
2024-12-08T04:28:07,169 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742065_1241 (size=213228)
2024-12-08T04:28:07,185 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742066_1242 (size=1877034)
2024-12-08T04:28:07,185 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742066_1242 (size=1877034)
2024-12-08T04:28:07,186 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742066_1242 (size=1877034)
2024-12-08T04:28:07,201 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742067_1243 (size=533455)
2024-12-08T04:28:07,201 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742067_1243 (size=533455)
2024-12-08T04:28:07,202 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742067_1243 (size=533455)
2024-12-08T04:28:07,251 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742068_1244 (size=7280644)
2024-12-08T04:28:07,251 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742068_1244 (size=7280644)
2024-12-08T04:28:07,252 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742068_1244 (size=7280644)
2024-12-08T04:28:07,279 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742069_1245 (size=4188619)
2024-12-08T04:28:07,280 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742069_1245 (size=4188619)
2024-12-08T04:28:07,280 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742069_1245 (size=4188619)
2024-12-08T04:28:07,291 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742070_1246 (size=451756)
2024-12-08T04:28:07,291 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742070_1246 (size=451756)
2024-12-08T04:28:07,291 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742070_1246 (size=451756)
2024-12-08T04:28:07,299 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742071_1247 (size=20406)
2024-12-08T04:28:07,300 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742071_1247 (size=20406)
2024-12-08T04:28:07,300 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742071_1247 (size=20406)
2024-12-08T04:28:07,313 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742072_1248 (size=75495)
2024-12-08T04:28:07,313 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742072_1248 (size=75495)
2024-12-08T04:28:07,314 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742072_1248 (size=75495)
2024-12-08T04:28:07,325 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742073_1249 (size=45609)
2024-12-08T04:28:07,326 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742073_1249 (size=45609)
2024-12-08T04:28:07,326 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742073_1249 (size=45609)
2024-12-08T04:28:07,334 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742074_1250 (size=110084)
2024-12-08T04:28:07,334 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742074_1250 (size=110084)
2024-12-08T04:28:07,335 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742074_1250 (size=110084)
2024-12-08T04:28:07,358 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742075_1251 (size=1323991)
2024-12-08T04:28:07,358 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742075_1251 (size=1323991)
2024-12-08T04:28:07,358 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742075_1251 (size=1323991)
2024-12-08T04:28:07,368 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742076_1252 (size=23076)
2024-12-08T04:28:07,368 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742076_1252 (size=23076)
2024-12-08T04:28:07,368 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742076_1252 (size=23076)
2024-12-08T04:28:07,382 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742077_1253 (size=126803)
2024-12-08T04:28:07,382 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742077_1253 (size=126803)
2024-12-08T04:28:07,382 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742077_1253 (size=126803)
2024-12-08T04:28:07,394 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742078_1254 (size=322274)
2024-12-08T04:28:07,395 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742078_1254 (size=322274)
2024-12-08T04:28:07,395 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742078_1254 (size=322274)
2024-12-08T04:28:07,423 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742079_1255 (size=6350155)
2024-12-08T04:28:07,424 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742079_1255 (size=6350155)
2024-12-08T04:28:07,424 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742079_1255 (size=6350155)
2024-12-08T04:28:07,451 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742080_1256 (size=1832290)
2024-12-08T04:28:07,451 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742080_1256 (size=1832290)
2024-12-08T04:28:07,451 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742080_1256 (size=1832290)
2024-12-08T04:28:07,461 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742081_1257 (size=30081)
2024-12-08T04:28:07,462 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742081_1257 (size=30081)
2024-12-08T04:28:07,462 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742081_1257 (size=30081)
2024-12-08T04:28:07,476 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742082_1258 (size=53616)
2024-12-08T04:28:07,480 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742082_1258 (size=53616)
2024-12-08T04:28:07,480 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742082_1258 (size=53616)
2024-12-08T04:28:07,885 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742083_1259 (size=29229)
2024-12-08T04:28:07,886 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742083_1259 (size=29229)
2024-12-08T04:28:07,886 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742083_1259 (size=29229)
2024-12-08T04:28:07,893 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742084_1260 (size=169089)
2024-12-08T04:28:07,893 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742084_1260 (size=169089)
2024-12-08T04:28:07,893 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742084_1260 (size=169089)
2024-12-08T04:28:07,912 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742085_1261 (size=5175431)
2024-12-08T04:28:07,912 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742085_1261 (size=5175431)
2024-12-08T04:28:07,912 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742085_1261 (size=5175431)
2024-12-08T04:28:07,920 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742086_1262 (size=136454)
2024-12-08T04:28:07,920 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742086_1262 (size=136454)
2024-12-08T04:28:07,923 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742086_1262 (size=136454)
2024-12-08T04:28:07,930 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742087_1263 (size=907852)
2024-12-08T04:28:07,931 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742087_1263 (size=907852)
2024-12-08T04:28:07,931 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742087_1263 (size=907852)
2024-12-08T04:28:07,951 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742088_1264 (size=3317408)
2024-12-08T04:28:07,951 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742088_1264 (size=3317408)
2024-12-08T04:28:07,952 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742088_1264 (size=3317408)
2024-12-08T04:28:07,961 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742089_1265 (size=503880)
2024-12-08T04:28:07,961 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742089_1265 (size=503880)
2024-12-08T04:28:07,961 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742089_1265 (size=503880)
2024-12-08T04:28:07,982 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742090_1266 (size=4695811)
2024-12-08T04:28:07,983 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742090_1266 (size=4695811)
2024-12-08T04:28:07,983 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742090_1266 (size=4695811)
2024-12-08T04:28:07,984 WARN  [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set.  User classes may not be found. See Job or Job#setJar(String).
2024-12-08T04:28:07,987 INFO  [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list
2024-12-08T04:28:07,988 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K
2024-12-08T04:28:07,994 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742091_1267 (size=338)
2024-12-08T04:28:07,995 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742091_1267 (size=338)
2024-12-08T04:28:07,995 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742091_1267 (size=338)
2024-12-08T04:28:08,001 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742092_1268 (size=15)
2024-12-08T04:28:08,001 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742092_1268 (size=15)
2024-12-08T04:28:08,002 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742092_1268 (size=15)
2024-12-08T04:28:08,034 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742093_1269 (size=304932)
2024-12-08T04:28:08,034 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742093_1269 (size=304932)
2024-12-08T04:28:08,034 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742093_1269 (size=304932)
2024-12-08T04:28:09,681 WARN  [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start
2024-12-08T04:28:09,681 WARN  [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start
2024-12-08T04:28:09,684 INFO  [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733631992429_0004_000001 (auth:SIMPLE) from 127.0.0.1:41208
2024-12-08T04:28:09,696 WARN  [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-1_2/usercache/jenkins/appcache/application_1733631992429_0004/container_1733631992429_0004_01_000001/launch_container.sh]
2024-12-08T04:28:09,696 WARN  [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-1_2/usercache/jenkins/appcache/application_1733631992429_0004/container_1733631992429_0004_01_000001/container_tokens]
2024-12-08T04:28:09,696 WARN  [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-1_2/usercache/jenkins/appcache/application_1733631992429_0004/container_1733631992429_0004_01_000001/sysfs]
2024-12-08T04:28:09,799 INFO  [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733631992429_0005_000001 (auth:SIMPLE) from 127.0.0.1:59400
2024-12-08T04:28:16,153 INFO  [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733631992429_0005_000001 (auth:SIMPLE) from 127.0.0.1:58246
2024-12-08T04:28:16,651 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742094_1270 (size=350606)
2024-12-08T04:28:16,651 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742094_1270 (size=350606)
2024-12-08T04:28:16,651 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742094_1270 (size=350606)
2024-12-08T04:28:18,516 INFO  [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733631992429_0005_000001 (auth:SIMPLE) from 127.0.0.1:52480
2024-12-08T04:28:22,003 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details.
2024-12-08T04:28:23,541 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742095_1271 (size=16925)
2024-12-08T04:28:23,541 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742095_1271 (size=16925)
2024-12-08T04:28:23,541 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742095_1271 (size=16925)
2024-12-08T04:28:23,587 WARN  [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-1_3/usercache/jenkins/appcache/application_1733631992429_0005/container_1733631992429_0005_01_000002/launch_container.sh]
2024-12-08T04:28:23,587 WARN  [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-1_3/usercache/jenkins/appcache/application_1733631992429_0005/container_1733631992429_0005_01_000002/container_tokens]
2024-12-08T04:28:23,587 WARN  [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-1_3/usercache/jenkins/appcache/application_1733631992429_0005/container_1733631992429_0005_01_000002/sysfs]
2024-12-08T04:28:23,596 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742096_1272 (size=462)
2024-12-08T04:28:23,597 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742096_1272 (size=462)
2024-12-08T04:28:23,597 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742096_1272 (size=462)
2024-12-08T04:28:23,651 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742097_1273 (size=16925)
2024-12-08T04:28:23,651 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742097_1273 (size=16925)
2024-12-08T04:28:23,652 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742097_1273 (size=16925)
2024-12-08T04:28:23,676 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742098_1274 (size=350606)
2024-12-08T04:28:23,676 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742098_1274 (size=350606)
2024-12-08T04:28:23,677 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742098_1274 (size=350606)
2024-12-08T04:28:23,694 INFO  [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733631992429_0005_000001 (auth:SIMPLE) from 127.0.0.1:50622
2024-12-08T04:28:25,260 INFO  [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export
2024-12-08T04:28:25,260 INFO  [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity.
2024-12-08T04:28:25,264 INFO  [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb0-testConsecutiveExports
2024-12-08T04:28:25,264 INFO  [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot
2024-12-08T04:28:25,265 INFO  [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state
2024-12-08T04:28:25,265 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1548841327_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/snaptb0-testConsecutiveExports
2024-12-08T04:28:25,266 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo
2024-12-08T04:28:25,266 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest
2024-12-08T04:28:25,266 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in org.apache.hadoop.fs.LocalFileSystem@66b9faa9 in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/local-export-1733632067123/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/local-export-1733632067123/.hbase-snapshot/snaptb0-testConsecutiveExports
2024-12-08T04:28:25,266 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/local-export-1733632067123/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest
2024-12-08T04:28:25,266 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/local-export-1733632067123/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo
2024-12-08T04:28:25,289 INFO  [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testConsecutiveExports
2024-12-08T04:28:25,290 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testConsecutiveExports
2024-12-08T04:28:25,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure2.ProcedureExecutor(1098): Stored pid=92, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testConsecutiveExports
2024-12-08T04:28:25,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92
2024-12-08T04:28:25,296 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733632105296"}]},"ts":"1733632105296"}
2024-12-08T04:28:25,299 INFO  [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testConsecutiveExports, state=DISABLING in hbase:meta
2024-12-08T04:28:25,301 INFO  [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testConsecutiveExports to state=DISABLING
2024-12-08T04:28:25,302 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=93, ppid=92, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testConsecutiveExports}]
2024-12-08T04:28:25,304 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=94, ppid=93, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=58971a382e19e721d272c8877c6654ff, UNASSIGN}, {pid=95, ppid=93, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=5423631ec99d2cca9328a2c4990b9aff, UNASSIGN}]
2024-12-08T04:28:25,305 INFO  [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=95, ppid=93, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=5423631ec99d2cca9328a2c4990b9aff, UNASSIGN
2024-12-08T04:28:25,306 INFO  [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=94, ppid=93, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=58971a382e19e721d272c8877c6654ff, UNASSIGN
2024-12-08T04:28:25,307 INFO  [PEWorker-1 {}] assignment.RegionStateStore(202): pid=94 updating hbase:meta row=58971a382e19e721d272c8877c6654ff, regionState=CLOSING, regionLocation=428ded7e54d6,41743,1733631984189
2024-12-08T04:28:25,307 INFO  [PEWorker-3 {}] assignment.RegionStateStore(202): pid=95 updating hbase:meta row=5423631ec99d2cca9328a2c4990b9aff, regionState=CLOSING, regionLocation=428ded7e54d6,46421,1733631984115
2024-12-08T04:28:25,308 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false
2024-12-08T04:28:25,308 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=96, ppid=95, state=RUNNABLE; CloseRegionProcedure 5423631ec99d2cca9328a2c4990b9aff, server=428ded7e54d6,46421,1733631984115}]
2024-12-08T04:28:25,310 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false
2024-12-08T04:28:25,310 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=97, ppid=94, state=RUNNABLE; CloseRegionProcedure 58971a382e19e721d272c8877c6654ff, server=428ded7e54d6,41743,1733631984189}]
2024-12-08T04:28:25,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92
2024-12-08T04:28:25,461 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,46421,1733631984115
2024-12-08T04:28:25,461 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(124): Close 5423631ec99d2cca9328a2c4990b9aff
2024-12-08T04:28:25,462 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false
2024-12-08T04:28:25,462 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1681): Closing 5423631ec99d2cca9328a2c4990b9aff, disabling compactions & flushes
2024-12-08T04:28:25,462 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1703): Closing region testtb-testConsecutiveExports,1,1733632065784.5423631ec99d2cca9328a2c4990b9aff.
2024-12-08T04:28:25,462 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testConsecutiveExports,1,1733632065784.5423631ec99d2cca9328a2c4990b9aff.
2024-12-08T04:28:25,462 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1791): Acquired close lock on testtb-testConsecutiveExports,1,1733632065784.5423631ec99d2cca9328a2c4990b9aff. after waiting 0 ms
2024-12-08T04:28:25,462 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1801): Updates disabled for region testtb-testConsecutiveExports,1,1733632065784.5423631ec99d2cca9328a2c4990b9aff.
2024-12-08T04:28:25,470 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,41743,1733631984189
2024-12-08T04:28:25,471 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] handler.UnassignRegionHandler(124): Close 58971a382e19e721d272c8877c6654ff
2024-12-08T04:28:25,471 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false
2024-12-08T04:28:25,471 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1681): Closing 58971a382e19e721d272c8877c6654ff, disabling compactions & flushes
2024-12-08T04:28:25,471 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1703): Closing region testtb-testConsecutiveExports,,1733632065784.58971a382e19e721d272c8877c6654ff.
2024-12-08T04:28:25,471 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testConsecutiveExports,,1733632065784.58971a382e19e721d272c8877c6654ff.
2024-12-08T04:28:25,471 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1791): Acquired close lock on testtb-testConsecutiveExports,,1733632065784.58971a382e19e721d272c8877c6654ff. after waiting 0 ms
2024-12-08T04:28:25,471 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1801): Updates disabled for region testtb-testConsecutiveExports,,1733632065784.58971a382e19e721d272c8877c6654ff.
2024-12-08T04:28:25,480 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testConsecutiveExports/5423631ec99d2cca9328a2c4990b9aff/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1
2024-12-08T04:28:25,481 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController
2024-12-08T04:28:25,481 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1922): Closed testtb-testConsecutiveExports,1,1733632065784.5423631ec99d2cca9328a2c4990b9aff.
2024-12-08T04:28:25,481 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1635): Region close journal for 5423631ec99d2cca9328a2c4990b9aff:

2024-12-08T04:28:25,484 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(170): Closed 5423631ec99d2cca9328a2c4990b9aff
2024-12-08T04:28:25,484 INFO  [PEWorker-4 {}] assignment.RegionStateStore(202): pid=95 updating hbase:meta row=5423631ec99d2cca9328a2c4990b9aff, regionState=CLOSED
2024-12-08T04:28:25,488 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=96, resume processing ppid=95
2024-12-08T04:28:25,490 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=96, ppid=95, state=SUCCESS; CloseRegionProcedure 5423631ec99d2cca9328a2c4990b9aff, server=428ded7e54d6,46421,1733631984115 in 178 msec
2024-12-08T04:28:25,491 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=95, ppid=93, state=SUCCESS; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=5423631ec99d2cca9328a2c4990b9aff, UNASSIGN in 184 msec
2024-12-08T04:28:25,495 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testConsecutiveExports/58971a382e19e721d272c8877c6654ff/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1
2024-12-08T04:28:25,501 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController
2024-12-08T04:28:25,501 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1922): Closed testtb-testConsecutiveExports,,1733632065784.58971a382e19e721d272c8877c6654ff.
2024-12-08T04:28:25,501 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1635): Region close journal for 58971a382e19e721d272c8877c6654ff:

2024-12-08T04:28:25,503 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] handler.UnassignRegionHandler(170): Closed 58971a382e19e721d272c8877c6654ff
2024-12-08T04:28:25,504 INFO  [PEWorker-5 {}] assignment.RegionStateStore(202): pid=94 updating hbase:meta row=58971a382e19e721d272c8877c6654ff, regionState=CLOSED
2024-12-08T04:28:25,509 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=97, resume processing ppid=94
2024-12-08T04:28:25,509 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=97, ppid=94, state=SUCCESS; CloseRegionProcedure 58971a382e19e721d272c8877c6654ff, server=428ded7e54d6,41743,1733631984189 in 195 msec
2024-12-08T04:28:25,511 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=94, resume processing ppid=93
2024-12-08T04:28:25,511 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=94, ppid=93, state=SUCCESS; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=58971a382e19e721d272c8877c6654ff, UNASSIGN in 205 msec
2024-12-08T04:28:25,513 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=93, resume processing ppid=92
2024-12-08T04:28:25,513 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=93, ppid=92, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testConsecutiveExports in 209 msec
2024-12-08T04:28:25,514 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733632105514"}]},"ts":"1733632105514"}
2024-12-08T04:28:25,516 INFO  [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testConsecutiveExports, state=DISABLED in hbase:meta
2024-12-08T04:28:25,518 INFO  [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testConsecutiveExports to state=DISABLED
2024-12-08T04:28:25,521 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=92, state=SUCCESS; DisableTableProcedure table=testtb-testConsecutiveExports in 229 msec
2024-12-08T04:28:25,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92
2024-12-08T04:28:25,600 INFO  [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testConsecutiveExports, procId: 92 completed
2024-12-08T04:28:25,600 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testConsecutiveExports
2024-12-08T04:28:25,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure2.ProcedureExecutor(1098): Stored pid=98, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testConsecutiveExports
2024-12-08T04:28:25,603 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=98, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testConsecutiveExports
2024-12-08T04:28:25,603 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=98, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testConsecutiveExports
2024-12-08T04:28:25,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testConsecutiveExports
2024-12-08T04:28:25,608 INFO  [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41743 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testConsecutiveExports
2024-12-08T04:28:25,615 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46421-0x1006fe072e80002, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports
2024-12-08T04:28:25,615 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports
2024-12-08T04:28:25,615 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports
2024-12-08T04:28:25,615 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports
2024-12-08T04:28:25,616 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF
2024-12-08T04:28:25,616 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF
2024-12-08T04:28:25,616 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF
2024-12-08T04:28:25,616 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF
2024-12-08T04:28:25,618 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testConsecutiveExports/58971a382e19e721d272c8877c6654ff
2024-12-08T04:28:25,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46421-0x1006fe072e80002, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports
2024-12-08T04:28:25,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46421-0x1006fe072e80002, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:28:25,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports
2024-12-08T04:28:25,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:28:25,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports
2024-12-08T04:28:25,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:28:25,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports
2024-12-08T04:28:25,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:28:25,626 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testConsecutiveExports/58971a382e19e721d272c8877c6654ff/cf, FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testConsecutiveExports/58971a382e19e721d272c8877c6654ff/recovered.edits]
2024-12-08T04:28:25,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98
2024-12-08T04:28:25,631 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testConsecutiveExports/58971a382e19e721d272c8877c6654ff/cf/f0c964fcbc534b83866820655a2bf386 to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/archive/data/default/testtb-testConsecutiveExports/58971a382e19e721d272c8877c6654ff/cf/f0c964fcbc534b83866820655a2bf386
2024-12-08T04:28:25,638 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testConsecutiveExports/58971a382e19e721d272c8877c6654ff/recovered.edits/9.seqid to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/archive/data/default/testtb-testConsecutiveExports/58971a382e19e721d272c8877c6654ff/recovered.edits/9.seqid
2024-12-08T04:28:25,639 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testConsecutiveExports/58971a382e19e721d272c8877c6654ff
2024-12-08T04:28:25,640 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testConsecutiveExports/5423631ec99d2cca9328a2c4990b9aff
2024-12-08T04:28:25,643 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testConsecutiveExports/5423631ec99d2cca9328a2c4990b9aff/cf, FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testConsecutiveExports/5423631ec99d2cca9328a2c4990b9aff/recovered.edits]
2024-12-08T04:28:25,650 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testConsecutiveExports/5423631ec99d2cca9328a2c4990b9aff/cf/8041f23762854868a31ad57513c65c99 to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/archive/data/default/testtb-testConsecutiveExports/5423631ec99d2cca9328a2c4990b9aff/cf/8041f23762854868a31ad57513c65c99
2024-12-08T04:28:25,657 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testConsecutiveExports/5423631ec99d2cca9328a2c4990b9aff/recovered.edits/9.seqid to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/archive/data/default/testtb-testConsecutiveExports/5423631ec99d2cca9328a2c4990b9aff/recovered.edits/9.seqid
2024-12-08T04:28:25,659 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testConsecutiveExports/5423631ec99d2cca9328a2c4990b9aff
2024-12-08T04:28:25,659 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testConsecutiveExports regions
2024-12-08T04:28:25,662 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=98, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testConsecutiveExports
2024-12-08T04:28:25,666 WARN  [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testConsecutiveExports from hbase:meta
2024-12-08T04:28:25,669 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testConsecutiveExports' descriptor.
2024-12-08T04:28:25,670 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=98, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testConsecutiveExports
2024-12-08T04:28:25,670 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testConsecutiveExports' from region states.
2024-12-08T04:28:25,671 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,,1733632065784.58971a382e19e721d272c8877c6654ff.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733632105670"}]},"ts":"9223372036854775807"}
2024-12-08T04:28:25,671 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,1,1733632065784.5423631ec99d2cca9328a2c4990b9aff.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733632105670"}]},"ts":"9223372036854775807"}
2024-12-08T04:28:25,674 INFO  [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META
2024-12-08T04:28:25,674 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 58971a382e19e721d272c8877c6654ff, NAME => 'testtb-testConsecutiveExports,,1733632065784.58971a382e19e721d272c8877c6654ff.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 5423631ec99d2cca9328a2c4990b9aff, NAME => 'testtb-testConsecutiveExports,1,1733632065784.5423631ec99d2cca9328a2c4990b9aff.', STARTKEY => '1', ENDKEY => ''}]
2024-12-08T04:28:25,674 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testConsecutiveExports' as deleted.
2024-12-08T04:28:25,674 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733632105674"}]},"ts":"9223372036854775807"}
2024-12-08T04:28:25,684 INFO  [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testConsecutiveExports state from META
2024-12-08T04:28:25,687 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=98, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testConsecutiveExports
2024-12-08T04:28:25,690 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=98, state=SUCCESS; DeleteTableProcedure table=testtb-testConsecutiveExports in 87 msec
2024-12-08T04:28:25,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98
2024-12-08T04:28:25,729 INFO  [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testConsecutiveExports, procId: 98 completed
2024-12-08T04:28:25,742 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testConsecutiveExports"

2024-12-08T04:28:25,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testConsecutiveExports
2024-12-08T04:28:25,746 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testConsecutiveExports"

2024-12-08T04:28:25,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testConsecutiveExports
2024-12-08T04:28:25,773 INFO  [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testConsecutiveExports Thread=790 (was 788)
Potentially hanging thread: HFileArchiver-10
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:32775
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704)
	java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903)
	app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: ApplicationMasterLauncher #7
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1548841327_22 at /127.0.0.1:58428 [Waiting for operation #3]
	java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
	java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
	app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335)
	app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131)
	java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244)
	java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263)
	java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334)
	java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312)
	app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72)
	app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: ApplicationMasterLauncher #5
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: IPC Client (30462390) connection to localhost/127.0.0.1:32775 from jenkins
	java.base@17.0.11/java.lang.Object.wait(Native Method)
	app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042)
	app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093)

Potentially hanging thread: hconnection-0x28111a62-shared-pool-22
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: LogDeleter #2
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
	java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177)
	java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: LogDeleter #3
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
	java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177)
	java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: hconnection-0x28111a62-shared-pool-23
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1548841327_22 at /127.0.0.1:59308 [Waiting for operation #2]
	java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
	java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
	app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335)
	app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131)
	java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244)
	java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263)
	java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334)
	java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312)
	app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72)
	app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: ForkJoinPool.commonPool-worker-4
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623)
	java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165)

Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-718042934_1 at /127.0.0.1:58408 [Waiting for operation #3]
	java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
	java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
	app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335)
	app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131)
	java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244)
	java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263)
	java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334)
	java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312)
	app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72)
	app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: hconnection-0x28111a62-shared-pool-24
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: ApplicationMasterLauncher #6
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: hconnection-0x28111a62-shared-pool-25
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: Thread-3892
	java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method)
	java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276)
	java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282)
	java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343)
	java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281)
	java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324)
	java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189)
	java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177)
	java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162)
	java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329)
	java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396)
	app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025)

Potentially hanging thread: HFileArchiver-9
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: ApplicationMasterLauncher #8
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1548841327_22 at /127.0.0.1:46924 [Waiting for operation #2]
	java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
	java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
	app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335)
	app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131)
	java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244)
	java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263)
	java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334)
	java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312)
	app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72)
	app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: process reaper (pid 28555)
	java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method)
	java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
 - Thread LEAK? -, OpenFileDescriptor=802 (was 811), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=614 (was 568) - SystemLoadAverage LEAK? -, ProcessCount=20 (was 20), AvailableMemoryMB=3317 (was 3759)
2024-12-08T04:28:25,773 WARN  [Time-limited test {}] hbase.ResourceChecker(130): Thread=790 is superior to 500
2024-12-08T04:28:25,797 INFO  [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=790, OpenFileDescriptor=802, MaxFileDescriptor=1048576, SystemLoadAverage=614, ProcessCount=20, AvailableMemoryMB=3314
2024-12-08T04:28:25,797 WARN  [Time-limited test {}] hbase.ResourceChecker(130): Thread=790 is superior to 500
2024-12-08T04:28:25,804 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}
2024-12-08T04:28:25,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure2.ProcedureExecutor(1098): Stored pid=99, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion
2024-12-08T04:28:25,806 INFO  [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=99, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_PRE_OPERATION
2024-12-08T04:28:25,806 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker
2024-12-08T04:28:25,807 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default"
qualifier: "testtb-testExportFileSystemStateWithMergeRegion"
 procId is: 99
2024-12-08T04:28:25,807 INFO  [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=99, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT
2024-12-08T04:28:25,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99
2024-12-08T04:28:25,829 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742099_1275 (size=422)
2024-12-08T04:28:25,830 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742099_1275 (size=422)
2024-12-08T04:28:25,830 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742099_1275 (size=422)
2024-12-08T04:28:25,835 INFO  [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => cbc84221475ff3a867edc4c51f6c6129, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1733632105803.cbc84221475ff3a867edc4c51f6c6129.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720
2024-12-08T04:28:25,837 INFO  [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 33c5526d7c6d067212022d154e4122df, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1733632105803.33c5526d7c6d067212022d154e4122df.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720
2024-12-08T04:28:25,857 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742100_1276 (size=83)
2024-12-08T04:28:25,858 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742100_1276 (size=83)
2024-12-08T04:28:25,858 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742100_1276 (size=83)
2024-12-08T04:28:25,859 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1733632105803.cbc84221475ff3a867edc4c51f6c6129.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable
2024-12-08T04:28:25,859 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1681): Closing cbc84221475ff3a867edc4c51f6c6129, disabling compactions & flushes
2024-12-08T04:28:25,859 INFO  [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1733632105803.cbc84221475ff3a867edc4c51f6c6129.
2024-12-08T04:28:25,859 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733632105803.cbc84221475ff3a867edc4c51f6c6129.
2024-12-08T04:28:25,859 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733632105803.cbc84221475ff3a867edc4c51f6c6129. after waiting 0 ms
2024-12-08T04:28:25,859 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1733632105803.cbc84221475ff3a867edc4c51f6c6129.
2024-12-08T04:28:25,859 INFO  [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion,,1733632105803.cbc84221475ff3a867edc4c51f6c6129.
2024-12-08T04:28:25,859 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1635): Region close journal for cbc84221475ff3a867edc4c51f6c6129:

2024-12-08T04:28:25,862 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742101_1277 (size=83)
2024-12-08T04:28:25,863 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742101_1277 (size=83)
2024-12-08T04:28:25,863 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742101_1277 (size=83)
2024-12-08T04:28:25,864 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1733632105803.33c5526d7c6d067212022d154e4122df.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable
2024-12-08T04:28:25,864 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1681): Closing 33c5526d7c6d067212022d154e4122df, disabling compactions & flushes
2024-12-08T04:28:25,864 INFO  [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1733632105803.33c5526d7c6d067212022d154e4122df.
2024-12-08T04:28:25,864 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733632105803.33c5526d7c6d067212022d154e4122df.
2024-12-08T04:28:25,864 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733632105803.33c5526d7c6d067212022d154e4122df. after waiting 0 ms
2024-12-08T04:28:25,864 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1733632105803.33c5526d7c6d067212022d154e4122df.
2024-12-08T04:28:25,864 INFO  [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1733632105803.33c5526d7c6d067212022d154e4122df.
2024-12-08T04:28:25,864 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1635): Region close journal for 33c5526d7c6d067212022d154e4122df:

2024-12-08T04:28:25,865 INFO  [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=99, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ADD_TO_META
2024-12-08T04:28:25,866 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1733632105803.cbc84221475ff3a867edc4c51f6c6129.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1733632105865"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733632105865"}]},"ts":"1733632105865"}
2024-12-08T04:28:25,866 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1733632105803.33c5526d7c6d067212022d154e4122df.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1733632105865"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733632105865"}]},"ts":"1733632105865"}
2024-12-08T04:28:25,869 INFO  [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta.
2024-12-08T04:28:25,870 INFO  [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=99, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ASSIGN_REGIONS
2024-12-08T04:28:25,870 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733632105870"}]},"ts":"1733632105870"}
2024-12-08T04:28:25,872 INFO  [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLING in hbase:meta
2024-12-08T04:28:25,876 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(202): Hosts are {428ded7e54d6=0} racks are {/default-rack=0}
2024-12-08T04:28:25,877 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 is on host 0
2024-12-08T04:28:25,877 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 is on host 0
2024-12-08T04:28:25,877 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 is on host 0
2024-12-08T04:28:25,877 INFO  [PEWorker-3 {}] balancer.BalancerClusterState(314): server 0 is on rack 0
2024-12-08T04:28:25,877 INFO  [PEWorker-3 {}] balancer.BalancerClusterState(314): server 1 is on rack 0
2024-12-08T04:28:25,877 INFO  [PEWorker-3 {}] balancer.BalancerClusterState(314): server 2 is on rack 0
2024-12-08T04:28:25,877 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1
2024-12-08T04:28:25,878 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=100, ppid=99, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=cbc84221475ff3a867edc4c51f6c6129, ASSIGN}, {pid=101, ppid=99, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=33c5526d7c6d067212022d154e4122df, ASSIGN}]
2024-12-08T04:28:25,879 INFO  [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=101, ppid=99, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=33c5526d7c6d067212022d154e4122df, ASSIGN
2024-12-08T04:28:25,879 INFO  [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=100, ppid=99, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=cbc84221475ff3a867edc4c51f6c6129, ASSIGN
2024-12-08T04:28:25,880 INFO  [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=101, ppid=99, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=33c5526d7c6d067212022d154e4122df, ASSIGN; state=OFFLINE, location=428ded7e54d6,46421,1733631984115; forceNewPlan=false, retain=false
2024-12-08T04:28:25,880 INFO  [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=100, ppid=99, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=cbc84221475ff3a867edc4c51f6c6129, ASSIGN; state=OFFLINE, location=428ded7e54d6,41743,1733631984189; forceNewPlan=false, retain=false
2024-12-08T04:28:25,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99
2024-12-08T04:28:26,031 INFO  [428ded7e54d6:46337 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 
2024-12-08T04:28:26,031 INFO  [PEWorker-4 {}] assignment.RegionStateStore(202): pid=101 updating hbase:meta row=33c5526d7c6d067212022d154e4122df, regionState=OPENING, regionLocation=428ded7e54d6,46421,1733631984115
2024-12-08T04:28:26,031 INFO  [PEWorker-2 {}] assignment.RegionStateStore(202): pid=100 updating hbase:meta row=cbc84221475ff3a867edc4c51f6c6129, regionState=OPENING, regionLocation=428ded7e54d6,41743,1733631984189
2024-12-08T04:28:26,034 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=102, ppid=101, state=RUNNABLE; OpenRegionProcedure 33c5526d7c6d067212022d154e4122df, server=428ded7e54d6,46421,1733631984115}]
2024-12-08T04:28:26,036 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=103, ppid=100, state=RUNNABLE; OpenRegionProcedure cbc84221475ff3a867edc4c51f6c6129, server=428ded7e54d6,41743,1733631984189}]
2024-12-08T04:28:26,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99
2024-12-08T04:28:26,186 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,46421,1733631984115
2024-12-08T04:28:26,190 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,41743,1733631984189
2024-12-08T04:28:26,190 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithMergeRegion,1,1733632105803.33c5526d7c6d067212022d154e4122df.
2024-12-08T04:28:26,190 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(7285): Opening region: {ENCODED => 33c5526d7c6d067212022d154e4122df, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1733632105803.33c5526d7c6d067212022d154e4122df.', STARTKEY => '1', ENDKEY => ''}
2024-12-08T04:28:26,191 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,1,1733632105803.33c5526d7c6d067212022d154e4122df. service=AccessControlService
2024-12-08T04:28:26,191 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911.
2024-12-08T04:28:26,191 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion 33c5526d7c6d067212022d154e4122df
2024-12-08T04:28:26,191 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1733632105803.33c5526d7c6d067212022d154e4122df.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable
2024-12-08T04:28:26,191 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(7327): checking encryption for 33c5526d7c6d067212022d154e4122df
2024-12-08T04:28:26,192 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(7330): checking classloading for 33c5526d7c6d067212022d154e4122df
2024-12-08T04:28:26,193 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithMergeRegion,,1733632105803.cbc84221475ff3a867edc4c51f6c6129.
2024-12-08T04:28:26,193 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(7285): Opening region: {ENCODED => cbc84221475ff3a867edc4c51f6c6129, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1733632105803.cbc84221475ff3a867edc4c51f6c6129.', STARTKEY => '', ENDKEY => '1'}
2024-12-08T04:28:26,194 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,,1733632105803.cbc84221475ff3a867edc4c51f6c6129. service=AccessControlService
2024-12-08T04:28:26,194 INFO  [StoreOpener-33c5526d7c6d067212022d154e4122df-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 33c5526d7c6d067212022d154e4122df 
2024-12-08T04:28:26,194 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911.
2024-12-08T04:28:26,194 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion cbc84221475ff3a867edc4c51f6c6129
2024-12-08T04:28:26,194 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1733632105803.cbc84221475ff3a867edc4c51f6c6129.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable
2024-12-08T04:28:26,194 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(7327): checking encryption for cbc84221475ff3a867edc4c51f6c6129
2024-12-08T04:28:26,194 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(7330): checking classloading for cbc84221475ff3a867edc4c51f6c6129
2024-12-08T04:28:26,197 INFO  [StoreOpener-33c5526d7c6d067212022d154e4122df-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 33c5526d7c6d067212022d154e4122df columnFamilyName cf
2024-12-08T04:28:26,197 DEBUG [StoreOpener-33c5526d7c6d067212022d154e4122df-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker
2024-12-08T04:28:26,200 INFO  [StoreOpener-33c5526d7c6d067212022d154e4122df-1 {}] regionserver.HStore(327): Store=33c5526d7c6d067212022d154e4122df/cf,  memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE
2024-12-08T04:28:26,201 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion/33c5526d7c6d067212022d154e4122df
2024-12-08T04:28:26,201 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion/33c5526d7c6d067212022d154e4122df
2024-12-08T04:28:26,205 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(1085): writing seq id for 33c5526d7c6d067212022d154e4122df
2024-12-08T04:28:26,224 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion/33c5526d7c6d067212022d154e4122df/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1
2024-12-08T04:28:26,224 INFO  [StoreOpener-cbc84221475ff3a867edc4c51f6c6129-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region cbc84221475ff3a867edc4c51f6c6129 
2024-12-08T04:28:26,226 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(1102): Opened 33c5526d7c6d067212022d154e4122df; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61053648, jitterRate=-0.09022974967956543}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1}
2024-12-08T04:28:26,227 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(1001): Region open journal for 33c5526d7c6d067212022d154e4122df:

2024-12-08T04:28:26,227 INFO  [StoreOpener-cbc84221475ff3a867edc4c51f6c6129-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cbc84221475ff3a867edc4c51f6c6129 columnFamilyName cf
2024-12-08T04:28:26,228 DEBUG [StoreOpener-cbc84221475ff3a867edc4c51f6c6129-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker
2024-12-08T04:28:26,228 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,1,1733632105803.33c5526d7c6d067212022d154e4122df., pid=102, masterSystemTime=1733632106186
2024-12-08T04:28:26,228 INFO  [StoreOpener-cbc84221475ff3a867edc4c51f6c6129-1 {}] regionserver.HStore(327): Store=cbc84221475ff3a867edc4c51f6c6129/cf,  memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE
2024-12-08T04:28:26,230 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion/cbc84221475ff3a867edc4c51f6c6129
2024-12-08T04:28:26,230 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion/cbc84221475ff3a867edc4c51f6c6129
2024-12-08T04:28:26,232 INFO  [PEWorker-1 {}] assignment.RegionStateStore(202): pid=101 updating hbase:meta row=33c5526d7c6d067212022d154e4122df, regionState=OPEN, openSeqNum=2, regionLocation=428ded7e54d6,46421,1733631984115
2024-12-08T04:28:26,232 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,1,1733632105803.33c5526d7c6d067212022d154e4122df.
2024-12-08T04:28:26,233 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithMergeRegion,1,1733632105803.33c5526d7c6d067212022d154e4122df.
2024-12-08T04:28:26,235 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(1085): writing seq id for cbc84221475ff3a867edc4c51f6c6129
2024-12-08T04:28:26,239 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=102, resume processing ppid=101
2024-12-08T04:28:26,239 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=102, ppid=101, state=SUCCESS; OpenRegionProcedure 33c5526d7c6d067212022d154e4122df, server=428ded7e54d6,46421,1733631984115 in 202 msec
2024-12-08T04:28:26,240 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion/cbc84221475ff3a867edc4c51f6c6129/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1
2024-12-08T04:28:26,240 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(1102): Opened cbc84221475ff3a867edc4c51f6c6129; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62644201, jitterRate=-0.06652866303920746}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1}
2024-12-08T04:28:26,240 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=101, ppid=99, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=33c5526d7c6d067212022d154e4122df, ASSIGN in 361 msec
2024-12-08T04:28:26,240 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(1001): Region open journal for cbc84221475ff3a867edc4c51f6c6129:

2024-12-08T04:28:26,242 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,,1733632105803.cbc84221475ff3a867edc4c51f6c6129., pid=103, masterSystemTime=1733632106190
2024-12-08T04:28:26,245 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,,1733632105803.cbc84221475ff3a867edc4c51f6c6129.
2024-12-08T04:28:26,245 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithMergeRegion,,1733632105803.cbc84221475ff3a867edc4c51f6c6129.
2024-12-08T04:28:26,245 INFO  [PEWorker-2 {}] assignment.RegionStateStore(202): pid=100 updating hbase:meta row=cbc84221475ff3a867edc4c51f6c6129, regionState=OPEN, openSeqNum=2, regionLocation=428ded7e54d6,41743,1733631984189
2024-12-08T04:28:26,250 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=103, resume processing ppid=100
2024-12-08T04:28:26,250 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=103, ppid=100, state=SUCCESS; OpenRegionProcedure cbc84221475ff3a867edc4c51f6c6129, server=428ded7e54d6,41743,1733631984189 in 211 msec
2024-12-08T04:28:26,253 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=100, resume processing ppid=99
2024-12-08T04:28:26,253 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=100, ppid=99, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=cbc84221475ff3a867edc4c51f6c6129, ASSIGN in 372 msec
2024-12-08T04:28:26,254 INFO  [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=99, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE
2024-12-08T04:28:26,254 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733632106254"}]},"ts":"1733632106254"}
2024-12-08T04:28:26,256 INFO  [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLED in hbase:meta
2024-12-08T04:28:26,261 INFO  [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=99, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_POST_OPERATION
2024-12-08T04:28:26,261 DEBUG [PEWorker-2 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion jenkins: RWXCA
2024-12-08T04:28:26,263 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41743 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA]
2024-12-08T04:28:26,265 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:28:26,265 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:28:26,265 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46421-0x1006fe072e80002, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:28:26,266 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:28:26,268 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04
2024-12-08T04:28:26,269 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04
2024-12-08T04:28:26,269 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04
2024-12-08T04:28:26,270 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=99, state=SUCCESS; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 464 msec
2024-12-08T04:28:26,270 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04
2024-12-08T04:28:26,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99
2024-12-08T04:28:26,417 INFO  [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion, procId: 99 completed
2024-12-08T04:28:26,418 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testExportFileSystemStateWithMergeRegion get assigned. Timeout = 60000ms
2024-12-08T04:28:26,418 INFO  [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1])
2024-12-08T04:28:26,425 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testExportFileSystemStateWithMergeRegion assigned to meta. Checking AM states.
2024-12-08T04:28:26,425 INFO  [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1])
2024-12-08T04:28:26,425 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testExportFileSystemStateWithMergeRegion assigned.
2024-12-08T04:28:26,433 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }
2024-12-08T04:28:26,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733632106433 (current time:1733632106433).
2024-12-08T04:28:26,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0
2024-12-08T04:28:26,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2
2024-12-08T04:28:26,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot
2024-12-08T04:28:26,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0b45c736 to 127.0.0.1:55878 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@19da9ccd
2024-12-08T04:28:26,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5392f2e7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null
2024-12-08T04:28:26,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false
2024-12-08T04:28:26,461 INFO  [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36572, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService
2024-12-08T04:28:26,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0b45c736 to 127.0.0.1:55878
2024-12-08T04:28:26,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(514): Stopping rpc client
2024-12-08T04:28:26,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1ec1f689 to 127.0.0.1:55878 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@53b03d68
2024-12-08T04:28:26,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1771f833, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null
2024-12-08T04:28:26,492 DEBUG [hconnection-0x6bc08544-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false
2024-12-08T04:28:26,494 INFO  [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36574, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService
2024-12-08T04:28:26,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1ec1f689 to 127.0.0.1:55878
2024-12-08T04:28:26,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(514): Stopping rpc client
2024-12-08T04:28:26,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA]
2024-12-08T04:28:26,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot...
2024-12-08T04:28:26,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure2.ProcedureExecutor(1098): Stored pid=104, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }
2024-12-08T04:28:26,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 104
2024-12-08T04:28:26,502 INFO  [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE
2024-12-08T04:28:26,503 INFO  [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION
2024-12-08T04:28:26,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104
2024-12-08T04:28:26,507 INFO  [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO
2024-12-08T04:28:26,549 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742102_1278 (size=215)
2024-12-08T04:28:26,550 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742102_1278 (size=215)
2024-12-08T04:28:26,551 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742102_1278 (size=215)
2024-12-08T04:28:26,552 INFO  [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS
2024-12-08T04:28:26,552 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=105, ppid=104, state=RUNNABLE; SnapshotRegionProcedure cbc84221475ff3a867edc4c51f6c6129}, {pid=106, ppid=104, state=RUNNABLE; SnapshotRegionProcedure 33c5526d7c6d067212022d154e4122df}]
2024-12-08T04:28:26,554 INFO  [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=106, ppid=104, state=RUNNABLE; SnapshotRegionProcedure 33c5526d7c6d067212022d154e4122df
2024-12-08T04:28:26,554 INFO  [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=105, ppid=104, state=RUNNABLE; SnapshotRegionProcedure cbc84221475ff3a867edc4c51f6c6129
2024-12-08T04:28:26,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104
2024-12-08T04:28:26,706 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,41743,1733631984189
2024-12-08T04:28:26,706 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,46421,1733631984115
2024-12-08T04:28:26,707 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41743 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=105
2024-12-08T04:28:26,707 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46421 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=106
2024-12-08T04:28:26,707 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733632105803.33c5526d7c6d067212022d154e4122df.
2024-12-08T04:28:26,707 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegion(2538): Flush status journal for 33c5526d7c6d067212022d154e4122df:

2024-12-08T04:28:26,707 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1733632105803.33c5526d7c6d067212022d154e4122df. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed.
2024-12-08T04:28:26,708 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1733632105803.33c5526d7c6d067212022d154e4122df.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion
2024-12-08T04:28:26,708 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(246): Creating references for hfiles
2024-12-08T04:28:26,708 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles
2024-12-08T04:28:26,708 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733632105803.cbc84221475ff3a867edc4c51f6c6129.
2024-12-08T04:28:26,708 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegion(2538): Flush status journal for cbc84221475ff3a867edc4c51f6c6129:

2024-12-08T04:28:26,708 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1733632105803.cbc84221475ff3a867edc4c51f6c6129. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed.
2024-12-08T04:28:26,709 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1733632105803.cbc84221475ff3a867edc4c51f6c6129.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion
2024-12-08T04:28:26,709 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(246): Creating references for hfiles
2024-12-08T04:28:26,709 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles
2024-12-08T04:28:26,740 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742103_1279 (size=86)
2024-12-08T04:28:26,740 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742103_1279 (size=86)
2024-12-08T04:28:26,741 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742103_1279 (size=86)
2024-12-08T04:28:26,741 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733632105803.33c5526d7c6d067212022d154e4122df.
2024-12-08T04:28:26,741 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=106
2024-12-08T04:28:26,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster(4106): Remote procedure done, pid=106
2024-12-08T04:28:26,741 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region 33c5526d7c6d067212022d154e4122df
2024-12-08T04:28:26,742 INFO  [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=106, ppid=104, state=RUNNABLE; SnapshotRegionProcedure 33c5526d7c6d067212022d154e4122df
2024-12-08T04:28:26,742 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742104_1280 (size=86)
2024-12-08T04:28:26,743 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742104_1280 (size=86)
2024-12-08T04:28:26,743 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742104_1280 (size=86)
2024-12-08T04:28:26,744 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=106, ppid=104, state=SUCCESS; SnapshotRegionProcedure 33c5526d7c6d067212022d154e4122df in 191 msec
2024-12-08T04:28:26,744 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733632105803.cbc84221475ff3a867edc4c51f6c6129.
2024-12-08T04:28:26,744 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=105
2024-12-08T04:28:26,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster(4106): Remote procedure done, pid=105
2024-12-08T04:28:26,744 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region cbc84221475ff3a867edc4c51f6c6129
2024-12-08T04:28:26,745 INFO  [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=105, ppid=104, state=RUNNABLE; SnapshotRegionProcedure cbc84221475ff3a867edc4c51f6c6129
2024-12-08T04:28:26,747 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=105, resume processing ppid=104
2024-12-08T04:28:26,747 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=105, ppid=104, state=SUCCESS; SnapshotRegionProcedure cbc84221475ff3a867edc4c51f6c6129 in 193 msec
2024-12-08T04:28:26,747 INFO  [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS
2024-12-08T04:28:26,748 INFO  [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION
2024-12-08T04:28:26,748 INFO  [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT
2024-12-08T04:28:26,748 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithMergeRegion
2024-12-08T04:28:26,749 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion
2024-12-08T04:28:26,759 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742105_1281 (size=597)
2024-12-08T04:28:26,760 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742105_1281 (size=597)
2024-12-08T04:28:26,760 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742105_1281 (size=597)
2024-12-08T04:28:26,765 INFO  [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT
2024-12-08T04:28:26,770 INFO  [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT
2024-12-08T04:28:26,771 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithMergeRegion
2024-12-08T04:28:26,772 INFO  [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION
2024-12-08T04:28:26,772 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 104
2024-12-08T04:28:26,774 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 272 msec
2024-12-08T04:28:26,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104
2024-12-08T04:28:26,809 INFO  [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion, procId: 104 completed
2024-12-08T04:28:26,818 INFO  [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41743 {}] regionserver.HRegion(8254): writing data to region testtb-testExportFileSystemStateWithMergeRegion,,1733632105803.cbc84221475ff3a867edc4c51f6c6129. with WAL disabled. Data may be lost in the event of a crash.
2024-12-08T04:28:26,819 INFO  [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46421 {}] regionserver.HRegion(8254): writing data to region testtb-testExportFileSystemStateWithMergeRegion,1,1733632105803.33c5526d7c6d067212022d154e4122df. with WAL disabled. Data may be lost in the event of a crash.
2024-12-08T04:28:26,823 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportFileSystemStateWithMergeRegion
2024-12-08T04:28:26,824 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportFileSystemStateWithMergeRegion,,1733632105803.cbc84221475ff3a867edc4c51f6c6129.
2024-12-08T04:28:26,824 INFO  [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1])
2024-12-08T04:28:26,838 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }
2024-12-08T04:28:26,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733632106838 (current time:1733632106838).
2024-12-08T04:28:26,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0
2024-12-08T04:28:26,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2
2024-12-08T04:28:26,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot
2024-12-08T04:28:26,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3085af1c to 127.0.0.1:55878 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5f00f6ed
2024-12-08T04:28:26,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7e44a1d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null
2024-12-08T04:28:26,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false
2024-12-08T04:28:26,845 INFO  [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36576, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService
2024-12-08T04:28:26,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3085af1c to 127.0.0.1:55878
2024-12-08T04:28:26,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(514): Stopping rpc client
2024-12-08T04:28:26,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5938a549 to 127.0.0.1:55878 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@157971d
2024-12-08T04:28:26,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@24468188, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null
2024-12-08T04:28:26,856 DEBUG [hconnection-0x2066a606-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false
2024-12-08T04:28:26,857 INFO  [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36578, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService
2024-12-08T04:28:26,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5938a549 to 127.0.0.1:55878
2024-12-08T04:28:26,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(514): Stopping rpc client
2024-12-08T04:28:26,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA]
2024-12-08T04:28:26,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot...
2024-12-08T04:28:26,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure2.ProcedureExecutor(1098): Stored pid=107, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }
2024-12-08T04:28:26,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 107
2024-12-08T04:28:26,863 INFO  [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE
2024-12-08T04:28:26,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=107
2024-12-08T04:28:26,864 INFO  [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION
2024-12-08T04:28:26,866 INFO  [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO
2024-12-08T04:28:26,880 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742106_1282 (size=210)
2024-12-08T04:28:26,881 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742106_1282 (size=210)
2024-12-08T04:28:26,881 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742106_1282 (size=210)
2024-12-08T04:28:26,882 INFO  [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS
2024-12-08T04:28:26,883 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=108, ppid=107, state=RUNNABLE; SnapshotRegionProcedure cbc84221475ff3a867edc4c51f6c6129}, {pid=109, ppid=107, state=RUNNABLE; SnapshotRegionProcedure 33c5526d7c6d067212022d154e4122df}]
2024-12-08T04:28:26,884 INFO  [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=109, ppid=107, state=RUNNABLE; SnapshotRegionProcedure 33c5526d7c6d067212022d154e4122df
2024-12-08T04:28:26,884 INFO  [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=108, ppid=107, state=RUNNABLE; SnapshotRegionProcedure cbc84221475ff3a867edc4c51f6c6129
2024-12-08T04:28:26,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=107
2024-12-08T04:28:27,035 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,46421,1733631984115
2024-12-08T04:28:27,035 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,41743,1733631984189
2024-12-08T04:28:27,035 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46421 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=109
2024-12-08T04:28:27,035 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41743 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=108
2024-12-08T04:28:27,036 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733632105803.cbc84221475ff3a867edc4c51f6c6129.
2024-12-08T04:28:27,036 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733632105803.33c5526d7c6d067212022d154e4122df.
2024-12-08T04:28:27,036 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.HRegion(2837): Flushing cbc84221475ff3a867edc4c51f6c6129 1/1 column families, dataSize=132 B heapSize=544 B
2024-12-08T04:28:27,036 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.HRegion(2837): Flushing 33c5526d7c6d067212022d154e4122df 1/1 column families, dataSize=3.13 KB heapSize=7 KB
2024-12-08T04:28:27,062 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion/33c5526d7c6d067212022d154e4122df/.tmp/cf/4c39cb2cab424ca98e799f628acaa1f3 is 71, key is 1999be31603f0c86420083b6d4f71f52/cf:q/1733632106819/Put/seqid=0
2024-12-08T04:28:27,065 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion/cbc84221475ff3a867edc4c51f6c6129/.tmp/cf/ae52ef979ba74ae093e4ab1ac55b990d is 71, key is 01f75db305d6b22430732e5cafdf930d/cf:q/1733632106818/Put/seqid=0
2024-12-08T04:28:27,078 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742107_1283 (size=8394)
2024-12-08T04:28:27,078 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742107_1283 (size=8394)
2024-12-08T04:28:27,078 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742107_1283 (size=8394)
2024-12-08T04:28:27,080 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.13 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion/33c5526d7c6d067212022d154e4122df/.tmp/cf/4c39cb2cab424ca98e799f628acaa1f3
2024-12-08T04:28:27,087 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion/33c5526d7c6d067212022d154e4122df/.tmp/cf/4c39cb2cab424ca98e799f628acaa1f3 as hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion/33c5526d7c6d067212022d154e4122df/cf/4c39cb2cab424ca98e799f628acaa1f3
2024-12-08T04:28:27,093 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion/33c5526d7c6d067212022d154e4122df/cf/4c39cb2cab424ca98e799f628acaa1f3, entries=48, sequenceid=6, filesize=8.2 K
2024-12-08T04:28:27,096 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.HRegion(3040): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 33c5526d7c6d067212022d154e4122df in 59ms, sequenceid=6, compaction requested=false
2024-12-08T04:28:27,096 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion'
2024-12-08T04:28:27,097 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.HRegion(2538): Flush status journal for 33c5526d7c6d067212022d154e4122df:

2024-12-08T04:28:27,097 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1733632105803.33c5526d7c6d067212022d154e4122df. for snaptb0-testExportFileSystemStateWithMergeRegion completed.
2024-12-08T04:28:27,097 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1733632105803.33c5526d7c6d067212022d154e4122df.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion
2024-12-08T04:28:27,097 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] snapshot.SnapshotManifest(246): Creating references for hfiles
2024-12-08T04:28:27,097 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion/33c5526d7c6d067212022d154e4122df/cf/4c39cb2cab424ca98e799f628acaa1f3] hfiles
2024-12-08T04:28:27,097 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion/33c5526d7c6d067212022d154e4122df/cf/4c39cb2cab424ca98e799f628acaa1f3 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion
2024-12-08T04:28:27,105 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742108_1284 (size=5216)
2024-12-08T04:28:27,105 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742108_1284 (size=5216)
2024-12-08T04:28:27,106 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742108_1284 (size=5216)
2024-12-08T04:28:27,107 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=132 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion/cbc84221475ff3a867edc4c51f6c6129/.tmp/cf/ae52ef979ba74ae093e4ab1ac55b990d
2024-12-08T04:28:27,121 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion/cbc84221475ff3a867edc4c51f6c6129/.tmp/cf/ae52ef979ba74ae093e4ab1ac55b990d as hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion/cbc84221475ff3a867edc4c51f6c6129/cf/ae52ef979ba74ae093e4ab1ac55b990d
2024-12-08T04:28:27,130 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion/cbc84221475ff3a867edc4c51f6c6129/cf/ae52ef979ba74ae093e4ab1ac55b990d, entries=2, sequenceid=6, filesize=5.1 K
2024-12-08T04:28:27,131 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.HRegion(3040): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for cbc84221475ff3a867edc4c51f6c6129 in 95ms, sequenceid=6, compaction requested=false
2024-12-08T04:28:27,131 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.HRegion(2538): Flush status journal for cbc84221475ff3a867edc4c51f6c6129:

2024-12-08T04:28:27,131 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1733632105803.cbc84221475ff3a867edc4c51f6c6129. for snaptb0-testExportFileSystemStateWithMergeRegion completed.
2024-12-08T04:28:27,132 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1733632105803.cbc84221475ff3a867edc4c51f6c6129.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion
2024-12-08T04:28:27,132 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] snapshot.SnapshotManifest(246): Creating references for hfiles
2024-12-08T04:28:27,132 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion/cbc84221475ff3a867edc4c51f6c6129/cf/ae52ef979ba74ae093e4ab1ac55b990d] hfiles
2024-12-08T04:28:27,132 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion/cbc84221475ff3a867edc4c51f6c6129/cf/ae52ef979ba74ae093e4ab1ac55b990d for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion
2024-12-08T04:28:27,141 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742109_1285 (size=125)
2024-12-08T04:28:27,141 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742109_1285 (size=125)
2024-12-08T04:28:27,141 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742109_1285 (size=125)
2024-12-08T04:28:27,143 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733632105803.33c5526d7c6d067212022d154e4122df.
2024-12-08T04:28:27,143 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=109
2024-12-08T04:28:27,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster(4106): Remote procedure done, pid=109
2024-12-08T04:28:27,143 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region 33c5526d7c6d067212022d154e4122df
2024-12-08T04:28:27,144 INFO  [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=109, ppid=107, state=RUNNABLE; SnapshotRegionProcedure 33c5526d7c6d067212022d154e4122df
2024-12-08T04:28:27,146 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=109, ppid=107, state=SUCCESS; SnapshotRegionProcedure 33c5526d7c6d067212022d154e4122df in 262 msec
2024-12-08T04:28:27,159 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742110_1286 (size=125)
2024-12-08T04:28:27,159 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742110_1286 (size=125)
2024-12-08T04:28:27,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=107
2024-12-08T04:28:27,167 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742110_1286 (size=125)
2024-12-08T04:28:27,167 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733632105803.cbc84221475ff3a867edc4c51f6c6129.
2024-12-08T04:28:27,167 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=108
2024-12-08T04:28:27,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster(4106): Remote procedure done, pid=108
2024-12-08T04:28:27,168 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region cbc84221475ff3a867edc4c51f6c6129
2024-12-08T04:28:27,168 INFO  [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=108, ppid=107, state=RUNNABLE; SnapshotRegionProcedure cbc84221475ff3a867edc4c51f6c6129
2024-12-08T04:28:27,170 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=108, resume processing ppid=107
2024-12-08T04:28:27,171 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=108, ppid=107, state=SUCCESS; SnapshotRegionProcedure cbc84221475ff3a867edc4c51f6c6129 in 286 msec
2024-12-08T04:28:27,171 INFO  [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS
2024-12-08T04:28:27,172 INFO  [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION
2024-12-08T04:28:27,173 INFO  [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT
2024-12-08T04:28:27,173 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion
2024-12-08T04:28:27,173 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion
2024-12-08T04:28:27,196 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742111_1287 (size=675)
2024-12-08T04:28:27,197 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742111_1287 (size=675)
2024-12-08T04:28:27,197 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742111_1287 (size=675)
2024-12-08T04:28:27,206 INFO  [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT
2024-12-08T04:28:27,213 INFO  [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT
2024-12-08T04:28:27,214 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion
2024-12-08T04:28:27,215 INFO  [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION
2024-12-08T04:28:27,216 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 107
2024-12-08T04:28:27,220 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=107, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 355 msec
2024-12-08T04:28:27,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=107
2024-12-08T04:28:27,467 INFO  [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion, procId: 107 completed
2024-12-08T04:28:27,489 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false
2024-12-08T04:28:27,491 INFO  [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36590, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService
2024-12-08T04:28:27,492 INFO  [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41743 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions
2024-12-08T04:28:27,493 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false
2024-12-08T04:28:27,495 INFO  [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50030, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService
2024-12-08T04:28:27,495 INFO  [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45955 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions
2024-12-08T04:28:27,496 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false
2024-12-08T04:28:27,497 INFO  [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46910, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService
2024-12-08T04:28:27,498 INFO  [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46421 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions
2024-12-08T04:28:27,500 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithMergeRegion-1', {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}
2024-12-08T04:28:27,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure2.ProcedureExecutor(1098): Stored pid=110, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1
2024-12-08T04:28:27,502 INFO  [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=110, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_PRE_OPERATION
2024-12-08T04:28:27,502 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker
2024-12-08T04:28:27,502 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default"
qualifier: "testtb-testExportFileSystemStateWithMergeRegion-1"
 procId is: 110
2024-12-08T04:28:27,503 INFO  [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=110, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_WRITE_FS_LAYOUT
2024-12-08T04:28:27,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110
2024-12-08T04:28:27,517 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742112_1288 (size=399)
2024-12-08T04:28:27,519 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742112_1288 (size=399)
2024-12-08T04:28:27,519 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742112_1288 (size=399)
2024-12-08T04:28:27,526 INFO  [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => d352c6e5c0bfcc84b67531b0900577bc, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733632107499.d352c6e5c0bfcc84b67531b0900577bc.', STARTKEY => '', ENDKEY => '2'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720
2024-12-08T04:28:27,526 INFO  [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 14ff522260a4a9748a386abe8032a3c6, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1733632107499.14ff522260a4a9748a386abe8032a3c6.', STARTKEY => '2', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720
2024-12-08T04:28:27,541 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742114_1290 (size=85)
2024-12-08T04:28:27,542 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742114_1290 (size=85)
2024-12-08T04:28:27,542 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742114_1290 (size=85)
2024-12-08T04:28:27,543 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1733632107499.d352c6e5c0bfcc84b67531b0900577bc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable
2024-12-08T04:28:27,543 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1681): Closing d352c6e5c0bfcc84b67531b0900577bc, disabling compactions & flushes
2024-12-08T04:28:27,543 INFO  [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1733632107499.d352c6e5c0bfcc84b67531b0900577bc.
2024-12-08T04:28:27,543 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733632107499.d352c6e5c0bfcc84b67531b0900577bc.
2024-12-08T04:28:27,543 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733632107499.d352c6e5c0bfcc84b67531b0900577bc. after waiting 0 ms
2024-12-08T04:28:27,543 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1733632107499.d352c6e5c0bfcc84b67531b0900577bc.
2024-12-08T04:28:27,543 INFO  [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1733632107499.d352c6e5c0bfcc84b67531b0900577bc.
2024-12-08T04:28:27,544 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1635): Region close journal for d352c6e5c0bfcc84b67531b0900577bc:

2024-12-08T04:28:27,545 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742113_1289 (size=85)
2024-12-08T04:28:27,546 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742113_1289 (size=85)
2024-12-08T04:28:27,546 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742113_1289 (size=85)
2024-12-08T04:28:27,547 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1733632107499.14ff522260a4a9748a386abe8032a3c6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable
2024-12-08T04:28:27,547 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1681): Closing 14ff522260a4a9748a386abe8032a3c6, disabling compactions & flushes
2024-12-08T04:28:27,547 INFO  [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733632107499.14ff522260a4a9748a386abe8032a3c6.
2024-12-08T04:28:27,547 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733632107499.14ff522260a4a9748a386abe8032a3c6.
2024-12-08T04:28:27,547 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733632107499.14ff522260a4a9748a386abe8032a3c6. after waiting 0 ms
2024-12-08T04:28:27,547 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733632107499.14ff522260a4a9748a386abe8032a3c6.
2024-12-08T04:28:27,547 INFO  [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1733632107499.14ff522260a4a9748a386abe8032a3c6.
2024-12-08T04:28:27,547 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1635): Region close journal for 14ff522260a4a9748a386abe8032a3c6:

2024-12-08T04:28:27,550 INFO  [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=110, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ADD_TO_META
2024-12-08T04:28:27,551 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733632107499.d352c6e5c0bfcc84b67531b0900577bc.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1733632107551"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733632107551"}]},"ts":"1733632107551"}
2024-12-08T04:28:27,551 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1733632107499.14ff522260a4a9748a386abe8032a3c6.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1733632107551"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733632107551"}]},"ts":"1733632107551"}
2024-12-08T04:28:27,553 INFO  [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta.
2024-12-08T04:28:27,556 INFO  [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=110, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ASSIGN_REGIONS
2024-12-08T04:28:27,556 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733632107556"}]},"ts":"1733632107556"}
2024-12-08T04:28:27,559 INFO  [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLING in hbase:meta
2024-12-08T04:28:27,565 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(202): Hosts are {428ded7e54d6=0} racks are {/default-rack=0}
2024-12-08T04:28:27,567 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 is on host 0
2024-12-08T04:28:27,570 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 is on host 0
2024-12-08T04:28:27,570 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 is on host 0
2024-12-08T04:28:27,570 INFO  [PEWorker-3 {}] balancer.BalancerClusterState(314): server 0 is on rack 0
2024-12-08T04:28:27,570 INFO  [PEWorker-3 {}] balancer.BalancerClusterState(314): server 1 is on rack 0
2024-12-08T04:28:27,570 INFO  [PEWorker-3 {}] balancer.BalancerClusterState(314): server 2 is on rack 0
2024-12-08T04:28:27,570 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1
2024-12-08T04:28:27,570 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=d352c6e5c0bfcc84b67531b0900577bc, ASSIGN}, {pid=112, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=14ff522260a4a9748a386abe8032a3c6, ASSIGN}]
2024-12-08T04:28:27,572 INFO  [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=112, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=14ff522260a4a9748a386abe8032a3c6, ASSIGN
2024-12-08T04:28:27,572 INFO  [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=111, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=d352c6e5c0bfcc84b67531b0900577bc, ASSIGN
2024-12-08T04:28:27,574 INFO  [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=112, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=14ff522260a4a9748a386abe8032a3c6, ASSIGN; state=OFFLINE, location=428ded7e54d6,41743,1733631984189; forceNewPlan=false, retain=false
2024-12-08T04:28:27,574 INFO  [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=111, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=d352c6e5c0bfcc84b67531b0900577bc, ASSIGN; state=OFFLINE, location=428ded7e54d6,45955,1733631983994; forceNewPlan=false, retain=false
2024-12-08T04:28:27,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110
2024-12-08T04:28:27,724 INFO  [428ded7e54d6:46337 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 
2024-12-08T04:28:27,724 INFO  [PEWorker-5 {}] assignment.RegionStateStore(202): pid=112 updating hbase:meta row=14ff522260a4a9748a386abe8032a3c6, regionState=OPENING, regionLocation=428ded7e54d6,41743,1733631984189
2024-12-08T04:28:27,724 INFO  [PEWorker-4 {}] assignment.RegionStateStore(202): pid=111 updating hbase:meta row=d352c6e5c0bfcc84b67531b0900577bc, regionState=OPENING, regionLocation=428ded7e54d6,45955,1733631983994
2024-12-08T04:28:27,726 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=113, ppid=112, state=RUNNABLE; OpenRegionProcedure 14ff522260a4a9748a386abe8032a3c6, server=428ded7e54d6,41743,1733631984189}]
2024-12-08T04:28:27,728 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=114, ppid=111, state=RUNNABLE; OpenRegionProcedure d352c6e5c0bfcc84b67531b0900577bc, server=428ded7e54d6,45955,1733631983994}]
2024-12-08T04:28:27,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110
2024-12-08T04:28:27,879 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,41743,1733631984189
2024-12-08T04:28:27,880 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,45955,1733631983994
2024-12-08T04:28:27,882 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithMergeRegion-1,2,1733632107499.14ff522260a4a9748a386abe8032a3c6.
2024-12-08T04:28:27,882 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(7285): Opening region: {ENCODED => 14ff522260a4a9748a386abe8032a3c6, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1733632107499.14ff522260a4a9748a386abe8032a3c6.', STARTKEY => '2', ENDKEY => ''}
2024-12-08T04:28:27,883 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,2,1733632107499.14ff522260a4a9748a386abe8032a3c6. service=AccessControlService
2024-12-08T04:28:27,883 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1733632107499.d352c6e5c0bfcc84b67531b0900577bc.
2024-12-08T04:28:27,883 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911.
2024-12-08T04:28:27,883 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(7285): Opening region: {ENCODED => d352c6e5c0bfcc84b67531b0900577bc, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733632107499.d352c6e5c0bfcc84b67531b0900577bc.', STARTKEY => '', ENDKEY => '2'}
2024-12-08T04:28:27,883 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 14ff522260a4a9748a386abe8032a3c6
2024-12-08T04:28:27,883 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1733632107499.14ff522260a4a9748a386abe8032a3c6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable
2024-12-08T04:28:27,883 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(7327): checking encryption for 14ff522260a4a9748a386abe8032a3c6
2024-12-08T04:28:27,883 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(7330): checking classloading for 14ff522260a4a9748a386abe8032a3c6
2024-12-08T04:28:27,883 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1733632107499.d352c6e5c0bfcc84b67531b0900577bc. service=AccessControlService
2024-12-08T04:28:27,884 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911.
2024-12-08T04:28:27,884 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 d352c6e5c0bfcc84b67531b0900577bc
2024-12-08T04:28:27,884 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1733632107499.d352c6e5c0bfcc84b67531b0900577bc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable
2024-12-08T04:28:27,884 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(7327): checking encryption for d352c6e5c0bfcc84b67531b0900577bc
2024-12-08T04:28:27,884 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(7330): checking classloading for d352c6e5c0bfcc84b67531b0900577bc
2024-12-08T04:28:27,885 INFO  [StoreOpener-14ff522260a4a9748a386abe8032a3c6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 14ff522260a4a9748a386abe8032a3c6 
2024-12-08T04:28:27,885 INFO  [StoreOpener-d352c6e5c0bfcc84b67531b0900577bc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region d352c6e5c0bfcc84b67531b0900577bc 
2024-12-08T04:28:27,886 INFO  [StoreOpener-14ff522260a4a9748a386abe8032a3c6-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 14ff522260a4a9748a386abe8032a3c6 columnFamilyName cf
2024-12-08T04:28:27,886 DEBUG [StoreOpener-14ff522260a4a9748a386abe8032a3c6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker
2024-12-08T04:28:27,887 INFO  [StoreOpener-d352c6e5c0bfcc84b67531b0900577bc-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d352c6e5c0bfcc84b67531b0900577bc columnFamilyName cf
2024-12-08T04:28:27,887 DEBUG [StoreOpener-d352c6e5c0bfcc84b67531b0900577bc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker
2024-12-08T04:28:27,887 INFO  [StoreOpener-14ff522260a4a9748a386abe8032a3c6-1 {}] regionserver.HStore(327): Store=14ff522260a4a9748a386abe8032a3c6/cf,  memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE
2024-12-08T04:28:27,887 INFO  [StoreOpener-d352c6e5c0bfcc84b67531b0900577bc-1 {}] regionserver.HStore(327): Store=d352c6e5c0bfcc84b67531b0900577bc/cf,  memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE
2024-12-08T04:28:27,888 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/14ff522260a4a9748a386abe8032a3c6
2024-12-08T04:28:27,888 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/14ff522260a4a9748a386abe8032a3c6
2024-12-08T04:28:27,888 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d352c6e5c0bfcc84b67531b0900577bc
2024-12-08T04:28:27,889 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d352c6e5c0bfcc84b67531b0900577bc
2024-12-08T04:28:27,890 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(1085): writing seq id for 14ff522260a4a9748a386abe8032a3c6
2024-12-08T04:28:27,891 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(1085): writing seq id for d352c6e5c0bfcc84b67531b0900577bc
2024-12-08T04:28:27,892 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/14ff522260a4a9748a386abe8032a3c6/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1
2024-12-08T04:28:27,893 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d352c6e5c0bfcc84b67531b0900577bc/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1
2024-12-08T04:28:27,893 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(1102): Opened 14ff522260a4a9748a386abe8032a3c6; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62989367, jitterRate=-0.06138528883457184}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1}
2024-12-08T04:28:27,893 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(1102): Opened d352c6e5c0bfcc84b67531b0900577bc; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67063560, jitterRate=-6.750822067260742E-4}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1}
2024-12-08T04:28:27,894 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(1001): Region open journal for d352c6e5c0bfcc84b67531b0900577bc:

2024-12-08T04:28:27,894 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(1001): Region open journal for 14ff522260a4a9748a386abe8032a3c6:

2024-12-08T04:28:27,894 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,2,1733632107499.14ff522260a4a9748a386abe8032a3c6., pid=113, masterSystemTime=1733632107879
2024-12-08T04:28:27,895 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1733632107499.d352c6e5c0bfcc84b67531b0900577bc., pid=114, masterSystemTime=1733632107880
2024-12-08T04:28:27,896 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,2,1733632107499.14ff522260a4a9748a386abe8032a3c6.
2024-12-08T04:28:27,896 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithMergeRegion-1,2,1733632107499.14ff522260a4a9748a386abe8032a3c6.
2024-12-08T04:28:27,897 INFO  [PEWorker-1 {}] assignment.RegionStateStore(202): pid=112 updating hbase:meta row=14ff522260a4a9748a386abe8032a3c6, regionState=OPEN, openSeqNum=2, regionLocation=428ded7e54d6,41743,1733631984189
2024-12-08T04:28:27,897 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1733632107499.d352c6e5c0bfcc84b67531b0900577bc.
2024-12-08T04:28:27,897 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1733632107499.d352c6e5c0bfcc84b67531b0900577bc.
2024-12-08T04:28:27,897 INFO  [PEWorker-5 {}] assignment.RegionStateStore(202): pid=111 updating hbase:meta row=d352c6e5c0bfcc84b67531b0900577bc, regionState=OPEN, openSeqNum=2, regionLocation=428ded7e54d6,45955,1733631983994
2024-12-08T04:28:27,900 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=113, resume processing ppid=112
2024-12-08T04:28:27,900 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=113, ppid=112, state=SUCCESS; OpenRegionProcedure 14ff522260a4a9748a386abe8032a3c6, server=428ded7e54d6,41743,1733631984189 in 172 msec
2024-12-08T04:28:27,901 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=112, ppid=110, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=14ff522260a4a9748a386abe8032a3c6, ASSIGN in 330 msec
2024-12-08T04:28:27,901 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=114, resume processing ppid=111
2024-12-08T04:28:27,901 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=114, ppid=111, state=SUCCESS; OpenRegionProcedure d352c6e5c0bfcc84b67531b0900577bc, server=428ded7e54d6,45955,1733631983994 in 172 msec
2024-12-08T04:28:27,903 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=111, resume processing ppid=110
2024-12-08T04:28:27,903 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=111, ppid=110, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=d352c6e5c0bfcc84b67531b0900577bc, ASSIGN in 331 msec
2024-12-08T04:28:27,903 INFO  [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=110, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_UPDATE_DESC_CACHE
2024-12-08T04:28:27,904 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733632107903"}]},"ts":"1733632107903"}
2024-12-08T04:28:27,905 INFO  [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLED in hbase:meta
2024-12-08T04:28:27,907 INFO  [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=110, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_POST_OPERATION
2024-12-08T04:28:27,907 DEBUG [PEWorker-2 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion-1 jenkins: RWXCA
2024-12-08T04:28:27,909 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41743 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA]
2024-12-08T04:28:27,911 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:28:27,911 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:28:27,911 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46421-0x1006fe072e80002, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:28:27,911 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:28:27,914 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04
2024-12-08T04:28:27,914 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04
2024-12-08T04:28:27,914 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04
2024-12-08T04:28:27,914 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04
2024-12-08T04:28:27,914 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04
2024-12-08T04:28:27,914 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04
2024-12-08T04:28:27,914 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04
2024-12-08T04:28:27,914 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04
2024-12-08T04:28:27,915 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=110, state=SUCCESS; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 413 msec
2024-12-08T04:28:28,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110
2024-12-08T04:28:28,107 INFO  [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1, procId: 110 completed
2024-12-08T04:28:28,130 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster$2(2219): Client=jenkins//172.17.0.2 merge regions [d352c6e5c0bfcc84b67531b0900577bc, 14ff522260a4a9748a386abe8032a3c6]
2024-12-08T04:28:28,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure2.ProcedureExecutor(1098): Stored pid=115, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[d352c6e5c0bfcc84b67531b0900577bc, 14ff522260a4a9748a386abe8032a3c6], force=true
2024-12-08T04:28:28,136 INFO  [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=115, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[d352c6e5c0bfcc84b67531b0900577bc, 14ff522260a4a9748a386abe8032a3c6], force=true
2024-12-08T04:28:28,136 INFO  [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=115, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[d352c6e5c0bfcc84b67531b0900577bc, 14ff522260a4a9748a386abe8032a3c6], force=true
2024-12-08T04:28:28,136 INFO  [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=115, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[d352c6e5c0bfcc84b67531b0900577bc, 14ff522260a4a9748a386abe8032a3c6], force=true
2024-12-08T04:28:28,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115
2024-12-08T04:28:28,147 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=116, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=d352c6e5c0bfcc84b67531b0900577bc, UNASSIGN}, {pid=117, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=14ff522260a4a9748a386abe8032a3c6, UNASSIGN}]
2024-12-08T04:28:28,147 INFO  [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=116, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=d352c6e5c0bfcc84b67531b0900577bc, UNASSIGN
2024-12-08T04:28:28,147 INFO  [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=117, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=14ff522260a4a9748a386abe8032a3c6, UNASSIGN
2024-12-08T04:28:28,148 INFO  [PEWorker-1 {}] assignment.RegionStateStore(202): pid=116 updating hbase:meta row=d352c6e5c0bfcc84b67531b0900577bc, regionState=CLOSING, regionLocation=428ded7e54d6,45955,1733631983994
2024-12-08T04:28:28,149 INFO  [PEWorker-5 {}] assignment.RegionStateStore(202): pid=117 updating hbase:meta row=14ff522260a4a9748a386abe8032a3c6, regionState=CLOSING, regionLocation=428ded7e54d6,41743,1733631984189
2024-12-08T04:28:28,149 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false
2024-12-08T04:28:28,150 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=118, ppid=116, state=RUNNABLE; CloseRegionProcedure d352c6e5c0bfcc84b67531b0900577bc, server=428ded7e54d6,45955,1733631983994}]
2024-12-08T04:28:28,150 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false
2024-12-08T04:28:28,151 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=119, ppid=117, state=RUNNABLE; CloseRegionProcedure 14ff522260a4a9748a386abe8032a3c6, server=428ded7e54d6,41743,1733631984189}]
2024-12-08T04:28:28,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115
2024-12-08T04:28:28,302 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,45955,1733631983994
2024-12-08T04:28:28,302 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] handler.UnassignRegionHandler(124): Close d352c6e5c0bfcc84b67531b0900577bc
2024-12-08T04:28:28,302 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] handler.UnassignRegionHandler(138): Unassign region: split region: true: evictCache: true
2024-12-08T04:28:28,302 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1681): Closing d352c6e5c0bfcc84b67531b0900577bc, disabling compactions & flushes
2024-12-08T04:28:28,303 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1733632107499.d352c6e5c0bfcc84b67531b0900577bc.
2024-12-08T04:28:28,303 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733632107499.d352c6e5c0bfcc84b67531b0900577bc.
2024-12-08T04:28:28,303 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733632107499.d352c6e5c0bfcc84b67531b0900577bc. after waiting 0 ms
2024-12-08T04:28:28,303 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1733632107499.d352c6e5c0bfcc84b67531b0900577bc.
2024-12-08T04:28:28,303 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(2837): Flushing d352c6e5c0bfcc84b67531b0900577bc 1/1 column families, dataSize=24 B heapSize=352 B
2024-12-08T04:28:28,303 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,41743,1733631984189
2024-12-08T04:28:28,303 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] handler.UnassignRegionHandler(124): Close 14ff522260a4a9748a386abe8032a3c6
2024-12-08T04:28:28,303 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] handler.UnassignRegionHandler(138): Unassign region: split region: true: evictCache: true
2024-12-08T04:28:28,303 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1681): Closing 14ff522260a4a9748a386abe8032a3c6, disabling compactions & flushes
2024-12-08T04:28:28,303 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733632107499.14ff522260a4a9748a386abe8032a3c6.
2024-12-08T04:28:28,304 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733632107499.14ff522260a4a9748a386abe8032a3c6.
2024-12-08T04:28:28,304 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733632107499.14ff522260a4a9748a386abe8032a3c6. after waiting 0 ms
2024-12-08T04:28:28,304 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733632107499.14ff522260a4a9748a386abe8032a3c6.
2024-12-08T04:28:28,304 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(2837): Flushing 14ff522260a4a9748a386abe8032a3c6 1/1 column families, dataSize=24 B heapSize=352 B
2024-12-08T04:28:28,320 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/14ff522260a4a9748a386abe8032a3c6/.tmp/cf/cf68b4a05485433e945c4fa94a6769e9 is 28, key is 2/cf:/1733632108115/Put/seqid=0
2024-12-08T04:28:28,326 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742115_1291 (size=4945)
2024-12-08T04:28:28,326 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742115_1291 (size=4945)
2024-12-08T04:28:28,327 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742115_1291 (size=4945)
2024-12-08T04:28:28,327 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/14ff522260a4a9748a386abe8032a3c6/.tmp/cf/cf68b4a05485433e945c4fa94a6769e9
2024-12-08T04:28:28,328 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d352c6e5c0bfcc84b67531b0900577bc/.tmp/cf/ac0893fcee0b4a53ad4c84d1cac1065c is 28, key is 1/cf:/1733632108110/Put/seqid=0
2024-12-08T04:28:28,333 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/14ff522260a4a9748a386abe8032a3c6/.tmp/cf/cf68b4a05485433e945c4fa94a6769e9 as hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/14ff522260a4a9748a386abe8032a3c6/cf/cf68b4a05485433e945c4fa94a6769e9
2024-12-08T04:28:28,338 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742116_1292 (size=4945)
2024-12-08T04:28:28,338 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742116_1292 (size=4945)
2024-12-08T04:28:28,338 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742116_1292 (size=4945)
2024-12-08T04:28:28,339 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d352c6e5c0bfcc84b67531b0900577bc/.tmp/cf/ac0893fcee0b4a53ad4c84d1cac1065c
2024-12-08T04:28:28,340 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/14ff522260a4a9748a386abe8032a3c6/cf/cf68b4a05485433e945c4fa94a6769e9, entries=1, sequenceid=5, filesize=4.8 K
2024-12-08T04:28:28,341 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(3040): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 14ff522260a4a9748a386abe8032a3c6 in 37ms, sequenceid=5, compaction requested=false
2024-12-08T04:28:28,341 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion-1'
2024-12-08T04:28:28,346 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/14ff522260a4a9748a386abe8032a3c6/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1
2024-12-08T04:28:28,346 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d352c6e5c0bfcc84b67531b0900577bc/.tmp/cf/ac0893fcee0b4a53ad4c84d1cac1065c as hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d352c6e5c0bfcc84b67531b0900577bc/cf/ac0893fcee0b4a53ad4c84d1cac1065c
2024-12-08T04:28:28,347 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController
2024-12-08T04:28:28,347 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1733632107499.14ff522260a4a9748a386abe8032a3c6.
2024-12-08T04:28:28,347 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1635): Region close journal for 14ff522260a4a9748a386abe8032a3c6:

2024-12-08T04:28:28,348 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] handler.UnassignRegionHandler(170): Closed 14ff522260a4a9748a386abe8032a3c6
2024-12-08T04:28:28,349 INFO  [PEWorker-3 {}] assignment.RegionStateStore(202): pid=117 updating hbase:meta row=14ff522260a4a9748a386abe8032a3c6, regionState=CLOSED
2024-12-08T04:28:28,353 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=119, resume processing ppid=117
2024-12-08T04:28:28,353 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=119, ppid=117, state=SUCCESS; CloseRegionProcedure 14ff522260a4a9748a386abe8032a3c6, server=428ded7e54d6,41743,1733631984189 in 201 msec
2024-12-08T04:28:28,353 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d352c6e5c0bfcc84b67531b0900577bc/cf/ac0893fcee0b4a53ad4c84d1cac1065c, entries=1, sequenceid=5, filesize=4.8 K
2024-12-08T04:28:28,354 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=117, ppid=115, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=14ff522260a4a9748a386abe8032a3c6, UNASSIGN in 207 msec
2024-12-08T04:28:28,354 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(3040): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for d352c6e5c0bfcc84b67531b0900577bc in 51ms, sequenceid=5, compaction requested=false
2024-12-08T04:28:28,358 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d352c6e5c0bfcc84b67531b0900577bc/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1
2024-12-08T04:28:28,358 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController
2024-12-08T04:28:28,359 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1733632107499.d352c6e5c0bfcc84b67531b0900577bc.
2024-12-08T04:28:28,359 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1635): Region close journal for d352c6e5c0bfcc84b67531b0900577bc:

2024-12-08T04:28:28,360 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] handler.UnassignRegionHandler(170): Closed d352c6e5c0bfcc84b67531b0900577bc
2024-12-08T04:28:28,360 INFO  [PEWorker-5 {}] assignment.RegionStateStore(202): pid=116 updating hbase:meta row=d352c6e5c0bfcc84b67531b0900577bc, regionState=CLOSED
2024-12-08T04:28:28,363 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=118, resume processing ppid=116
2024-12-08T04:28:28,364 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=118, ppid=116, state=SUCCESS; CloseRegionProcedure d352c6e5c0bfcc84b67531b0900577bc, server=428ded7e54d6,45955,1733631983994 in 213 msec
2024-12-08T04:28:28,365 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=116, resume processing ppid=115
2024-12-08T04:28:28,365 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=116, ppid=115, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=d352c6e5c0bfcc84b67531b0900577bc, UNASSIGN in 217 msec
2024-12-08T04:28:28,381 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742117_1293 (size=84)
2024-12-08T04:28:28,382 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742117_1293 (size=84)
2024-12-08T04:28:28,382 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742117_1293 (size=84)
2024-12-08T04:28:28,384 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker
2024-12-08T04:28:28,394 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742118_1294 (size=20)
2024-12-08T04:28:28,394 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742118_1294 (size=20)
2024-12-08T04:28:28,394 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742118_1294 (size=20)
2024-12-08T04:28:28,396 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker
2024-12-08T04:28:28,402 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742119_1295 (size=21)
2024-12-08T04:28:28,402 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742119_1295 (size=21)
2024-12-08T04:28:28,403 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742119_1295 (size=21)
2024-12-08T04:28:28,414 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742120_1296 (size=84)
2024-12-08T04:28:28,414 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742120_1296 (size=84)
2024-12-08T04:28:28,414 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742120_1296 (size=84)
2024-12-08T04:28:28,416 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker
2024-12-08T04:28:28,427 DEBUG [PEWorker-2 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b371213db39acba44c12b50885d6398e/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=-1
2024-12-08T04:28:28,429 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733632107499.d352c6e5c0bfcc84b67531b0900577bc.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"}
2024-12-08T04:28:28,429 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1733632107499.14ff522260a4a9748a386abe8032a3c6.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"}
2024-12-08T04:28:28,429 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":7,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733632107500.b371213db39acba44c12b50885d6398e.","families":{"info":[{"qualifier":"regioninfo","vlen":83,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0000","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0001","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"}
2024-12-08T04:28:28,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115
2024-12-08T04:28:28,470 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=120, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=b371213db39acba44c12b50885d6398e, ASSIGN}]
2024-12-08T04:28:28,471 INFO  [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=120, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=b371213db39acba44c12b50885d6398e, ASSIGN
2024-12-08T04:28:28,471 INFO  [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=120, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=b371213db39acba44c12b50885d6398e, ASSIGN; state=MERGED, location=428ded7e54d6,45955,1733631983994; forceNewPlan=false, retain=false
2024-12-08T04:28:28,621 INFO  [428ded7e54d6:46337 {}] balancer.BaseLoadBalancer(546): Reassigned 1 regions. 1 retained the pre-restart assignment. 
2024-12-08T04:28:28,622 INFO  [PEWorker-1 {}] assignment.RegionStateStore(202): pid=120 updating hbase:meta row=b371213db39acba44c12b50885d6398e, regionState=OPENING, regionLocation=428ded7e54d6,45955,1733631983994
2024-12-08T04:28:28,624 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=121, ppid=120, state=RUNNABLE; OpenRegionProcedure b371213db39acba44c12b50885d6398e, server=428ded7e54d6,45955,1733631983994}]
2024-12-08T04:28:28,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115
2024-12-08T04:28:28,775 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,45955,1733631983994
2024-12-08T04:28:28,778 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1733632107500.b371213db39acba44c12b50885d6398e.
2024-12-08T04:28:28,778 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(7285): Opening region: {ENCODED => b371213db39acba44c12b50885d6398e, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733632107500.b371213db39acba44c12b50885d6398e.', STARTKEY => '', ENDKEY => ''}
2024-12-08T04:28:28,779 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1733632107500.b371213db39acba44c12b50885d6398e. service=AccessControlService
2024-12-08T04:28:28,779 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911.
2024-12-08T04:28:28,779 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 b371213db39acba44c12b50885d6398e
2024-12-08T04:28:28,779 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1733632107500.b371213db39acba44c12b50885d6398e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable
2024-12-08T04:28:28,779 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(7327): checking encryption for b371213db39acba44c12b50885d6398e
2024-12-08T04:28:28,779 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(7330): checking classloading for b371213db39acba44c12b50885d6398e
2024-12-08T04:28:28,781 INFO  [StoreOpener-b371213db39acba44c12b50885d6398e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region b371213db39acba44c12b50885d6398e 
2024-12-08T04:28:28,782 INFO  [StoreOpener-b371213db39acba44c12b50885d6398e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b371213db39acba44c12b50885d6398e columnFamilyName cf
2024-12-08T04:28:28,782 DEBUG [StoreOpener-b371213db39acba44c12b50885d6398e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker
2024-12-08T04:28:28,802 DEBUG [StoreOpener-b371213db39acba44c12b50885d6398e-1 {}] regionserver.StoreEngine(277): loaded hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b371213db39acba44c12b50885d6398e/cf/ac0893fcee0b4a53ad4c84d1cac1065c.d352c6e5c0bfcc84b67531b0900577bc->hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d352c6e5c0bfcc84b67531b0900577bc/cf/ac0893fcee0b4a53ad4c84d1cac1065c-top
2024-12-08T04:28:28,807 DEBUG [StoreOpener-b371213db39acba44c12b50885d6398e-1 {}] regionserver.StoreEngine(277): loaded hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b371213db39acba44c12b50885d6398e/cf/cf68b4a05485433e945c4fa94a6769e9.14ff522260a4a9748a386abe8032a3c6->hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/14ff522260a4a9748a386abe8032a3c6/cf/cf68b4a05485433e945c4fa94a6769e9-top
2024-12-08T04:28:28,808 INFO  [StoreOpener-b371213db39acba44c12b50885d6398e-1 {}] regionserver.HStore(327): Store=b371213db39acba44c12b50885d6398e/cf,  memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE
2024-12-08T04:28:28,809 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b371213db39acba44c12b50885d6398e
2024-12-08T04:28:28,810 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b371213db39acba44c12b50885d6398e
2024-12-08T04:28:28,812 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(1085): writing seq id for b371213db39acba44c12b50885d6398e
2024-12-08T04:28:28,813 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(1102): Opened b371213db39acba44c12b50885d6398e; next sequenceid=9; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63897320, jitterRate=-0.04785573482513428}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1}
2024-12-08T04:28:28,814 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(1001): Region open journal for b371213db39acba44c12b50885d6398e:

2024-12-08T04:28:28,814 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1733632107500.b371213db39acba44c12b50885d6398e., pid=121, masterSystemTime=1733632108775
2024-12-08T04:28:28,815 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.CompactSplit(342): Ignoring compaction request for testtb-testExportFileSystemStateWithMergeRegion-1,,1733632107500.b371213db39acba44c12b50885d6398e.,because compaction is disabled.
2024-12-08T04:28:28,816 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1733632107500.b371213db39acba44c12b50885d6398e.
2024-12-08T04:28:28,816 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1733632107500.b371213db39acba44c12b50885d6398e.
2024-12-08T04:28:28,817 INFO  [PEWorker-4 {}] assignment.RegionStateStore(202): pid=120 updating hbase:meta row=b371213db39acba44c12b50885d6398e, regionState=OPEN, openSeqNum=9, regionLocation=428ded7e54d6,45955,1733631983994
2024-12-08T04:28:28,820 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=121, resume processing ppid=120
2024-12-08T04:28:28,820 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=121, ppid=120, state=SUCCESS; OpenRegionProcedure b371213db39acba44c12b50885d6398e, server=428ded7e54d6,45955,1733631983994 in 195 msec
2024-12-08T04:28:28,821 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=120, resume processing ppid=115
2024-12-08T04:28:28,821 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=120, ppid=115, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=b371213db39acba44c12b50885d6398e, ASSIGN in 350 msec
2024-12-08T04:28:28,823 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=115, state=SUCCESS; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[d352c6e5c0bfcc84b67531b0900577bc, 14ff522260a4a9748a386abe8032a3c6], force=true in 689 msec
2024-12-08T04:28:29,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115
2024-12-08T04:28:29,242 INFO  [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: MERGE_REGIONS, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1, procId: 115 completed
2024-12-08T04:28:29,243 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }
2024-12-08T04:28:29,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733632109243 (current time:1733632109243).
2024-12-08T04:28:29,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0
2024-12-08T04:28:29,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 VERSION not specified, setting to 2
2024-12-08T04:28:29,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot
2024-12-08T04:28:29,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5b49a6b9 to 127.0.0.1:55878 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7a515c91
2024-12-08T04:28:29,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1d3ce4d5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null
2024-12-08T04:28:29,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false
2024-12-08T04:28:29,254 INFO  [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36602, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService
2024-12-08T04:28:29,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5b49a6b9 to 127.0.0.1:55878
2024-12-08T04:28:29,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(514): Stopping rpc client
2024-12-08T04:28:29,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6ec501f0 to 127.0.0.1:55878 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1e006d6d
2024-12-08T04:28:29,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@64e03834, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null
2024-12-08T04:28:29,266 DEBUG [hconnection-0x642e7696-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false
2024-12-08T04:28:29,267 INFO  [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36604, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService
2024-12-08T04:28:29,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6ec501f0 to 127.0.0.1:55878
2024-12-08T04:28:29,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(514): Stopping rpc client
2024-12-08T04:28:29,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA]
2024-12-08T04:28:29,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot...
2024-12-08T04:28:29,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure2.ProcedureExecutor(1098): Stored pid=122, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }
2024-12-08T04:28:29,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 122
2024-12-08T04:28:29,273 INFO  [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE
2024-12-08T04:28:29,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122
2024-12-08T04:28:29,274 INFO  [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION
2024-12-08T04:28:29,276 INFO  [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO
2024-12-08T04:28:29,284 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742121_1297 (size=216)
2024-12-08T04:28:29,284 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742121_1297 (size=216)
2024-12-08T04:28:29,284 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742121_1297 (size=216)
2024-12-08T04:28:29,288 INFO  [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS
2024-12-08T04:28:29,288 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE; SnapshotRegionProcedure b371213db39acba44c12b50885d6398e}]
2024-12-08T04:28:29,289 INFO  [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=123, ppid=122, state=RUNNABLE; SnapshotRegionProcedure b371213db39acba44c12b50885d6398e
2024-12-08T04:28:29,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122
2024-12-08T04:28:29,384 DEBUG [master/428ded7e54d6:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region cbc84221475ff3a867edc4c51f6c6129 changed from -1.0 to 0.0, refreshing cache
2024-12-08T04:28:29,385 DEBUG [master/428ded7e54d6:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 33c5526d7c6d067212022d154e4122df changed from -1.0 to 0.0, refreshing cache
2024-12-08T04:28:29,440 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,45955,1733631983994
2024-12-08T04:28:29,441 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45955 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=123
2024-12-08T04:28:29,441 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1733632107500.b371213db39acba44c12b50885d6398e.
2024-12-08T04:28:29,441 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegion(2538): Flush status journal for b371213db39acba44c12b50885d6398e:

2024-12-08T04:28:29,441 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion-1,,1733632107500.b371213db39acba44c12b50885d6398e. for snaptb0-testExportFileSystemStateWithMergeRegion-1 completed.
2024-12-08T04:28:29,442 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733632107500.b371213db39acba44c12b50885d6398e.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1
2024-12-08T04:28:29,442 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(246): Creating references for hfiles
2024-12-08T04:28:29,442 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b371213db39acba44c12b50885d6398e/cf/ac0893fcee0b4a53ad4c84d1cac1065c.d352c6e5c0bfcc84b67531b0900577bc->hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d352c6e5c0bfcc84b67531b0900577bc/cf/ac0893fcee0b4a53ad4c84d1cac1065c-top, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b371213db39acba44c12b50885d6398e/cf/cf68b4a05485433e945c4fa94a6769e9.14ff522260a4a9748a386abe8032a3c6->hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/14ff522260a4a9748a386abe8032a3c6/cf/cf68b4a05485433e945c4fa94a6769e9-top] hfiles
2024-12-08T04:28:29,442 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(265): Adding reference for file (1/2): hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b371213db39acba44c12b50885d6398e/cf/ac0893fcee0b4a53ad4c84d1cac1065c.d352c6e5c0bfcc84b67531b0900577bc for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1
2024-12-08T04:28:29,443 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(265): Adding reference for file (2/2): hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b371213db39acba44c12b50885d6398e/cf/cf68b4a05485433e945c4fa94a6769e9.14ff522260a4a9748a386abe8032a3c6 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1
2024-12-08T04:28:29,449 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742122_1298 (size=269)
2024-12-08T04:28:29,450 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742122_1298 (size=269)
2024-12-08T04:28:29,450 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742122_1298 (size=269)
2024-12-08T04:28:29,450 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1733632107500.b371213db39acba44c12b50885d6398e.
2024-12-08T04:28:29,450 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=123
2024-12-08T04:28:29,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster(4106): Remote procedure done, pid=123
2024-12-08T04:28:29,451 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 on region b371213db39acba44c12b50885d6398e
2024-12-08T04:28:29,451 INFO  [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=123, ppid=122, state=RUNNABLE; SnapshotRegionProcedure b371213db39acba44c12b50885d6398e
2024-12-08T04:28:29,454 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=123, resume processing ppid=122
2024-12-08T04:28:29,454 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=123, ppid=122, state=SUCCESS; SnapshotRegionProcedure b371213db39acba44c12b50885d6398e in 164 msec
2024-12-08T04:28:29,454 INFO  [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS
2024-12-08T04:28:29,455 INFO  [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION
2024-12-08T04:28:29,456 INFO  [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT
2024-12-08T04:28:29,456 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion-1
2024-12-08T04:28:29,456 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1
2024-12-08T04:28:29,470 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742123_1299 (size=670)
2024-12-08T04:28:29,470 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742123_1299 (size=670)
2024-12-08T04:28:29,471 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742123_1299 (size=670)
2024-12-08T04:28:29,473 INFO  [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT
2024-12-08T04:28:29,479 INFO  [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT
2024-12-08T04:28:29,479 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1
2024-12-08T04:28:29,480 INFO  [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION
2024-12-08T04:28:29,480 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 122
2024-12-08T04:28:29,481 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=122, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } in 209 msec
2024-12-08T04:28:29,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122
2024-12-08T04:28:29,575 INFO  [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1, procId: 122 completed
2024-12-08T04:28:29,575 INFO  [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632109575
2024-12-08T04:28:29,575 INFO  [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:41407, tgtDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632109575, rawTgtDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632109575, srcFsUri=hdfs://localhost:41407, srcDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720
2024-12-08T04:28:29,610 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:41407, inputRoot=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720
2024-12-08T04:28:29,610 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1548841327_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632109575, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632109575/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1
2024-12-08T04:28:29,612 INFO  [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity.
2024-12-08T04:28:29,618 INFO  [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632109575/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1
2024-12-08T04:28:29,633 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742125_1301 (size=670)
2024-12-08T04:28:29,633 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742124_1300 (size=216)
2024-12-08T04:28:29,633 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742124_1300 (size=216)
2024-12-08T04:28:29,634 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742125_1301 (size=670)
2024-12-08T04:28:29,634 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742125_1301 (size=670)
2024-12-08T04:28:29,634 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742124_1300 (size=216)
2024-12-08T04:28:29,636 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar
2024-12-08T04:28:29,637 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar
2024-12-08T04:28:29,637 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar
2024-12-08T04:28:29,638 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar
2024-12-08T04:28:29,815 INFO  [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733631992429_0005_000001 (auth:SIMPLE) from 127.0.0.1:38872
2024-12-08T04:28:29,848 WARN  [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-0_3/usercache/jenkins/appcache/application_1733631992429_0005/container_1733631992429_0005_01_000001/launch_container.sh]
2024-12-08T04:28:29,848 WARN  [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-0_3/usercache/jenkins/appcache/application_1733631992429_0005/container_1733631992429_0005_01_000001/container_tokens]
2024-12-08T04:28:29,848 WARN  [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-0_3/usercache/jenkins/appcache/application_1733631992429_0005/container_1733631992429_0005_01_000001/sysfs]
2024-12-08T04:28:30,785 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/hadoop-9414640476513097250.jar
2024-12-08T04:28:30,785 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar
2024-12-08T04:28:30,786 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar
2024-12-08T04:28:30,863 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/hadoop-13088054557732227308.jar
2024-12-08T04:28:30,864 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar
2024-12-08T04:28:30,864 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar
2024-12-08T04:28:30,865 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar
2024-12-08T04:28:30,865 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar
2024-12-08T04:28:30,865 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar
2024-12-08T04:28:30,865 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar
2024-12-08T04:28:30,865 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar
2024-12-08T04:28:30,866 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar
2024-12-08T04:28:30,866 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar
2024-12-08T04:28:30,866 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar
2024-12-08T04:28:30,866 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar
2024-12-08T04:28:30,867 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar
2024-12-08T04:28:30,867 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar
2024-12-08T04:28:30,867 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar
2024-12-08T04:28:30,867 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar
2024-12-08T04:28:30,867 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar
2024-12-08T04:28:30,868 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar
2024-12-08T04:28:30,868 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar
2024-12-08T04:28:30,868 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar
2024-12-08T04:28:30,869 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar
2024-12-08T04:28:30,869 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar
2024-12-08T04:28:30,869 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar
2024-12-08T04:28:30,869 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar
2024-12-08T04:28:30,870 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar
2024-12-08T04:28:30,870 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar
2024-12-08T04:28:30,928 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742126_1302 (size=127628)
2024-12-08T04:28:30,928 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742126_1302 (size=127628)
2024-12-08T04:28:30,928 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742126_1302 (size=127628)
2024-12-08T04:28:30,949 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742127_1303 (size=2172101)
2024-12-08T04:28:30,949 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742127_1303 (size=2172101)
2024-12-08T04:28:30,950 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742127_1303 (size=2172101)
2024-12-08T04:28:30,964 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742128_1304 (size=213228)
2024-12-08T04:28:30,964 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742128_1304 (size=213228)
2024-12-08T04:28:30,965 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742128_1304 (size=213228)
2024-12-08T04:28:30,979 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742129_1305 (size=1877034)
2024-12-08T04:28:30,980 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742129_1305 (size=1877034)
2024-12-08T04:28:30,980 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742129_1305 (size=1877034)
2024-12-08T04:28:30,990 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742130_1306 (size=533455)
2024-12-08T04:28:30,991 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742130_1306 (size=533455)
2024-12-08T04:28:30,991 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742130_1306 (size=533455)
2024-12-08T04:28:31,022 WARN  [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties
2024-12-08T04:28:31,037 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742131_1307 (size=7280644)
2024-12-08T04:28:31,043 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742131_1307 (size=7280644)
2024-12-08T04:28:31,044 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742131_1307 (size=7280644)
2024-12-08T04:28:31,101 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742132_1308 (size=4188619)
2024-12-08T04:28:31,101 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742132_1308 (size=4188619)
2024-12-08T04:28:31,102 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742132_1308 (size=4188619)
2024-12-08T04:28:31,118 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742133_1309 (size=20406)
2024-12-08T04:28:31,118 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742133_1309 (size=20406)
2024-12-08T04:28:31,118 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742133_1309 (size=20406)
2024-12-08T04:28:31,527 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742134_1310 (size=75495)
2024-12-08T04:28:31,527 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742134_1310 (size=75495)
2024-12-08T04:28:31,527 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742134_1310 (size=75495)
2024-12-08T04:28:31,534 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742135_1311 (size=45609)
2024-12-08T04:28:31,534 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742135_1311 (size=45609)
2024-12-08T04:28:31,535 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742135_1311 (size=45609)
2024-12-08T04:28:31,541 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742136_1312 (size=110084)
2024-12-08T04:28:31,542 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742136_1312 (size=110084)
2024-12-08T04:28:31,542 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742136_1312 (size=110084)
2024-12-08T04:28:31,553 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742137_1313 (size=1323991)
2024-12-08T04:28:31,553 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742137_1313 (size=1323991)
2024-12-08T04:28:31,554 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742137_1313 (size=1323991)
2024-12-08T04:28:31,561 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742138_1314 (size=23076)
2024-12-08T04:28:31,561 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742138_1314 (size=23076)
2024-12-08T04:28:31,561 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742138_1314 (size=23076)
2024-12-08T04:28:31,568 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742139_1315 (size=126803)
2024-12-08T04:28:31,568 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742139_1315 (size=126803)
2024-12-08T04:28:31,569 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742139_1315 (size=126803)
2024-12-08T04:28:31,577 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742140_1316 (size=322274)
2024-12-08T04:28:31,577 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742140_1316 (size=322274)
2024-12-08T04:28:31,577 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742140_1316 (size=322274)
2024-12-08T04:28:31,589 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742141_1317 (size=1832290)
2024-12-08T04:28:31,589 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742141_1317 (size=1832290)
2024-12-08T04:28:31,589 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742141_1317 (size=1832290)
2024-12-08T04:28:31,596 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742142_1318 (size=30081)
2024-12-08T04:28:31,596 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742142_1318 (size=30081)
2024-12-08T04:28:31,597 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742142_1318 (size=30081)
2024-12-08T04:28:31,603 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742143_1319 (size=53616)
2024-12-08T04:28:31,604 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742143_1319 (size=53616)
2024-12-08T04:28:31,604 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742143_1319 (size=53616)
2024-12-08T04:28:31,610 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742144_1320 (size=29229)
2024-12-08T04:28:31,611 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742144_1320 (size=29229)
2024-12-08T04:28:31,611 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742144_1320 (size=29229)
2024-12-08T04:28:31,638 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742145_1321 (size=6350155)
2024-12-08T04:28:31,639 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742145_1321 (size=6350155)
2024-12-08T04:28:31,639 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742145_1321 (size=6350155)
2024-12-08T04:28:31,646 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742146_1322 (size=169089)
2024-12-08T04:28:31,646 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742146_1322 (size=169089)
2024-12-08T04:28:31,647 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742146_1322 (size=169089)
2024-12-08T04:28:31,659 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742147_1323 (size=451756)
2024-12-08T04:28:31,659 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742147_1323 (size=451756)
2024-12-08T04:28:31,660 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742147_1323 (size=451756)
2024-12-08T04:28:31,687 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742148_1324 (size=5175431)
2024-12-08T04:28:31,687 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742148_1324 (size=5175431)
2024-12-08T04:28:31,688 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742148_1324 (size=5175431)
2024-12-08T04:28:31,695 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742149_1325 (size=136454)
2024-12-08T04:28:31,696 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742149_1325 (size=136454)
2024-12-08T04:28:31,696 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742149_1325 (size=136454)
2024-12-08T04:28:31,706 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742150_1326 (size=907852)
2024-12-08T04:28:31,706 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742150_1326 (size=907852)
2024-12-08T04:28:31,707 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742150_1326 (size=907852)
2024-12-08T04:28:31,722 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742151_1327 (size=3317408)
2024-12-08T04:28:31,723 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742151_1327 (size=3317408)
2024-12-08T04:28:31,723 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742151_1327 (size=3317408)
2024-12-08T04:28:31,733 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742152_1328 (size=503880)
2024-12-08T04:28:31,733 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742152_1328 (size=503880)
2024-12-08T04:28:31,734 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742152_1328 (size=503880)
2024-12-08T04:28:31,754 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742153_1329 (size=4695811)
2024-12-08T04:28:31,754 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742153_1329 (size=4695811)
2024-12-08T04:28:31,755 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742153_1329 (size=4695811)
2024-12-08T04:28:31,756 WARN  [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set.  User classes may not be found. See Job or Job#setJar(String).
2024-12-08T04:28:31,758 INFO  [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testExportFileSystemStateWithMergeRegion-1' hfile list
2024-12-08T04:28:31,760 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=9.7 K
2024-12-08T04:28:31,767 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742154_1330 (size=378)
2024-12-08T04:28:31,767 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742154_1330 (size=378)
2024-12-08T04:28:31,767 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742154_1330 (size=378)
2024-12-08T04:28:31,773 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742155_1331 (size=15)
2024-12-08T04:28:31,773 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742155_1331 (size=15)
2024-12-08T04:28:31,773 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742155_1331 (size=15)
2024-12-08T04:28:31,785 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742156_1332 (size=304942)
2024-12-08T04:28:31,786 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742156_1332 (size=304942)
2024-12-08T04:28:31,786 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742156_1332 (size=304942)
2024-12-08T04:28:31,805 WARN  [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start
2024-12-08T04:28:31,805 WARN  [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start
2024-12-08T04:28:31,815 INFO  [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733631992429_0006_000001 (auth:SIMPLE) from 127.0.0.1:38886
2024-12-08T04:28:33,632 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion
2024-12-08T04:28:33,632 INFO  [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion Metrics about Tables on a single HBase RegionServer
2024-12-08T04:28:33,633 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1
2024-12-08T04:28:33,633 INFO  [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 Metrics about Tables on a single HBase RegionServer
2024-12-08T04:28:33,634 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports
2024-12-08T04:28:38,125 INFO  [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733631992429_0006_000001 (auth:SIMPLE) from 127.0.0.1:33200
2024-12-08T04:28:38,390 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742157_1333 (size=350616)
2024-12-08T04:28:38,394 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742157_1333 (size=350616)
2024-12-08T04:28:38,394 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742157_1333 (size=350616)
2024-12-08T04:28:39,136 WARN  [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties
2024-12-08T04:28:40,457 INFO  [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733631992429_0006_000001 (auth:SIMPLE) from 127.0.0.1:40312
2024-12-08T04:28:45,478 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742158_1334 (size=4945)
2024-12-08T04:28:45,478 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742158_1334 (size=4945)
2024-12-08T04:28:45,478 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742158_1334 (size=4945)
2024-12-08T04:28:45,531 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742159_1335 (size=4945)
2024-12-08T04:28:45,531 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742159_1335 (size=4945)
2024-12-08T04:28:45,531 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742159_1335 (size=4945)
2024-12-08T04:28:45,605 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742160_1336 (size=17474)
2024-12-08T04:28:45,606 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742160_1336 (size=17474)
2024-12-08T04:28:45,606 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742160_1336 (size=17474)
2024-12-08T04:28:45,618 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742161_1337 (size=482)
2024-12-08T04:28:45,618 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742161_1337 (size=482)
2024-12-08T04:28:45,618 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742161_1337 (size=482)
2024-12-08T04:28:45,652 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742162_1338 (size=17474)
2024-12-08T04:28:45,652 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742162_1338 (size=17474)
2024-12-08T04:28:45,653 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742162_1338 (size=17474)
2024-12-08T04:28:45,673 WARN  [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-1_3/usercache/jenkins/appcache/application_1733631992429_0006/container_1733631992429_0006_01_000002/launch_container.sh]
2024-12-08T04:28:45,673 WARN  [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-1_3/usercache/jenkins/appcache/application_1733631992429_0006/container_1733631992429_0006_01_000002/container_tokens]
2024-12-08T04:28:45,674 WARN  [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-1_3/usercache/jenkins/appcache/application_1733631992429_0006/container_1733631992429_0006_01_000002/sysfs]
2024-12-08T04:28:45,678 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742163_1339 (size=350616)
2024-12-08T04:28:45,679 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742163_1339 (size=350616)
2024-12-08T04:28:45,679 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742163_1339 (size=350616)
2024-12-08T04:28:45,692 INFO  [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733631992429_0006_000001 (auth:SIMPLE) from 127.0.0.1:39066
2024-12-08T04:28:47,224 INFO  [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export
2024-12-08T04:28:47,226 INFO  [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity.
2024-12-08T04:28:47,234 INFO  [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb0-testExportFileSystemStateWithMergeRegion-1
2024-12-08T04:28:47,234 INFO  [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot
2024-12-08T04:28:47,235 INFO  [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state
2024-12-08T04:28:47,235 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1548841327_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1
2024-12-08T04:28:47,236 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo
2024-12-08T04:28:47,236 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest
2024-12-08T04:28:47,236 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1548841327_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632109575/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632109575/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1
2024-12-08T04:28:47,237 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632109575/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo
2024-12-08T04:28:47,237 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632109575/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest
2024-12-08T04:28:47,249 INFO  [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportFileSystemStateWithMergeRegion-1
2024-12-08T04:28:47,250 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithMergeRegion-1
2024-12-08T04:28:47,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure2.ProcedureExecutor(1098): Stored pid=124, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1
2024-12-08T04:28:47,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124
2024-12-08T04:28:47,254 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733632127254"}]},"ts":"1733632127254"}
2024-12-08T04:28:47,256 INFO  [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLING in hbase:meta
2024-12-08T04:28:47,259 INFO  [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLING
2024-12-08T04:28:47,260 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=125, ppid=124, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1}]
2024-12-08T04:28:47,261 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=b371213db39acba44c12b50885d6398e, UNASSIGN}]
2024-12-08T04:28:47,263 INFO  [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=b371213db39acba44c12b50885d6398e, UNASSIGN
2024-12-08T04:28:47,264 INFO  [PEWorker-5 {}] assignment.RegionStateStore(202): pid=126 updating hbase:meta row=b371213db39acba44c12b50885d6398e, regionState=CLOSING, regionLocation=428ded7e54d6,45955,1733631983994
2024-12-08T04:28:47,265 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false
2024-12-08T04:28:47,265 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE; CloseRegionProcedure b371213db39acba44c12b50885d6398e, server=428ded7e54d6,45955,1733631983994}]
2024-12-08T04:28:47,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124
2024-12-08T04:28:47,417 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,45955,1733631983994
2024-12-08T04:28:47,418 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] handler.UnassignRegionHandler(124): Close b371213db39acba44c12b50885d6398e
2024-12-08T04:28:47,418 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false
2024-12-08T04:28:47,418 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1681): Closing b371213db39acba44c12b50885d6398e, disabling compactions & flushes
2024-12-08T04:28:47,418 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1733632107500.b371213db39acba44c12b50885d6398e.
2024-12-08T04:28:47,418 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733632107500.b371213db39acba44c12b50885d6398e.
2024-12-08T04:28:47,418 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733632107500.b371213db39acba44c12b50885d6398e. after waiting 0 ms
2024-12-08T04:28:47,418 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1733632107500.b371213db39acba44c12b50885d6398e.
2024-12-08T04:28:47,452 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b371213db39acba44c12b50885d6398e/recovered.edits/12.seqid, newMaxSeqId=12, maxSeqId=8
2024-12-08T04:28:47,453 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController
2024-12-08T04:28:47,454 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1733632107500.b371213db39acba44c12b50885d6398e.
2024-12-08T04:28:47,454 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1635): Region close journal for b371213db39acba44c12b50885d6398e:

2024-12-08T04:28:47,456 INFO  [PEWorker-2 {}] assignment.RegionStateStore(202): pid=126 updating hbase:meta row=b371213db39acba44c12b50885d6398e, regionState=CLOSED
2024-12-08T04:28:47,457 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] handler.UnassignRegionHandler(170): Closed b371213db39acba44c12b50885d6398e
2024-12-08T04:28:47,460 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=127, resume processing ppid=126
2024-12-08T04:28:47,461 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=127, ppid=126, state=SUCCESS; CloseRegionProcedure b371213db39acba44c12b50885d6398e, server=428ded7e54d6,45955,1733631983994 in 193 msec
2024-12-08T04:28:47,462 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=126, resume processing ppid=125
2024-12-08T04:28:47,462 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=126, ppid=125, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=b371213db39acba44c12b50885d6398e, UNASSIGN in 199 msec
2024-12-08T04:28:47,464 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=125, resume processing ppid=124
2024-12-08T04:28:47,464 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=125, ppid=124, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 203 msec
2024-12-08T04:28:47,466 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733632127465"}]},"ts":"1733632127465"}
2024-12-08T04:28:47,467 INFO  [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLED in hbase:meta
2024-12-08T04:28:47,469 INFO  [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLED
2024-12-08T04:28:47,471 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=124, state=SUCCESS; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 220 msec
2024-12-08T04:28:47,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124
2024-12-08T04:28:47,557 INFO  [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1, procId: 124 completed
2024-12-08T04:28:47,558 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithMergeRegion-1
2024-12-08T04:28:47,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure2.ProcedureExecutor(1098): Stored pid=128, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1
2024-12-08T04:28:47,560 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=128, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1
2024-12-08T04:28:47,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion-1
2024-12-08T04:28:47,562 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=128, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1
2024-12-08T04:28:47,563 INFO  [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41743 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion-1
2024-12-08T04:28:47,567 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b371213db39acba44c12b50885d6398e
2024-12-08T04:28:47,568 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1
2024-12-08T04:28:47,568 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1
2024-12-08T04:28:47,568 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1
2024-12-08T04:28:47,568 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46421-0x1006fe072e80002, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1
2024-12-08T04:28:47,569 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF
2024-12-08T04:28:47,569 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF
2024-12-08T04:28:47,569 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF
2024-12-08T04:28:47,569 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF
2024-12-08T04:28:47,570 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d352c6e5c0bfcc84b67531b0900577bc
2024-12-08T04:28:47,570 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b371213db39acba44c12b50885d6398e/cf, FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b371213db39acba44c12b50885d6398e/recovered.edits]
2024-12-08T04:28:47,571 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46421-0x1006fe072e80002, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1
2024-12-08T04:28:47,571 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1
2024-12-08T04:28:47,571 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1
2024-12-08T04:28:47,571 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46421-0x1006fe072e80002, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:28:47,571 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1
2024-12-08T04:28:47,571 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:28:47,571 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:28:47,571 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:28:47,572 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04
2024-12-08T04:28:47,573 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04
2024-12-08T04:28:47,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128
2024-12-08T04:28:47,573 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04
2024-12-08T04:28:47,573 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04
2024-12-08T04:28:47,579 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b371213db39acba44c12b50885d6398e/cf/ac0893fcee0b4a53ad4c84d1cac1065c.d352c6e5c0bfcc84b67531b0900577bc to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b371213db39acba44c12b50885d6398e/cf/ac0893fcee0b4a53ad4c84d1cac1065c.d352c6e5c0bfcc84b67531b0900577bc
2024-12-08T04:28:47,580 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d352c6e5c0bfcc84b67531b0900577bc/cf, FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d352c6e5c0bfcc84b67531b0900577bc/recovered.edits]
2024-12-08T04:28:47,582 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b371213db39acba44c12b50885d6398e/cf/cf68b4a05485433e945c4fa94a6769e9.14ff522260a4a9748a386abe8032a3c6 to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b371213db39acba44c12b50885d6398e/cf/cf68b4a05485433e945c4fa94a6769e9.14ff522260a4a9748a386abe8032a3c6
2024-12-08T04:28:47,584 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/14ff522260a4a9748a386abe8032a3c6
2024-12-08T04:28:47,586 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/14ff522260a4a9748a386abe8032a3c6/cf, FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/14ff522260a4a9748a386abe8032a3c6/recovered.edits]
2024-12-08T04:28:47,592 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d352c6e5c0bfcc84b67531b0900577bc/cf/ac0893fcee0b4a53ad4c84d1cac1065c to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d352c6e5c0bfcc84b67531b0900577bc/cf/ac0893fcee0b4a53ad4c84d1cac1065c
2024-12-08T04:28:47,594 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b371213db39acba44c12b50885d6398e/recovered.edits/12.seqid to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b371213db39acba44c12b50885d6398e/recovered.edits/12.seqid
2024-12-08T04:28:47,595 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b371213db39acba44c12b50885d6398e
2024-12-08T04:28:47,597 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/14ff522260a4a9748a386abe8032a3c6/cf/cf68b4a05485433e945c4fa94a6769e9 to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/14ff522260a4a9748a386abe8032a3c6/cf/cf68b4a05485433e945c4fa94a6769e9
2024-12-08T04:28:47,600 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d352c6e5c0bfcc84b67531b0900577bc/recovered.edits/8.seqid to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d352c6e5c0bfcc84b67531b0900577bc/recovered.edits/8.seqid
2024-12-08T04:28:47,601 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d352c6e5c0bfcc84b67531b0900577bc
2024-12-08T04:28:47,602 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/14ff522260a4a9748a386abe8032a3c6/recovered.edits/8.seqid to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/14ff522260a4a9748a386abe8032a3c6/recovered.edits/8.seqid
2024-12-08T04:28:47,602 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/14ff522260a4a9748a386abe8032a3c6
2024-12-08T04:28:47,603 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion-1 regions
2024-12-08T04:28:47,605 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=128, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1
2024-12-08T04:28:47,610 WARN  [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of testtb-testExportFileSystemStateWithMergeRegion-1 from hbase:meta
2024-12-08T04:28:47,617 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' descriptor.
2024-12-08T04:28:47,619 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=128, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1
2024-12-08T04:28:47,619 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' from region states.
2024-12-08T04:28:47,620 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733632107500.b371213db39acba44c12b50885d6398e.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733632127619"}]},"ts":"9223372036854775807"}
2024-12-08T04:28:47,622 INFO  [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META
2024-12-08T04:28:47,622 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => b371213db39acba44c12b50885d6398e, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733632107500.b371213db39acba44c12b50885d6398e.', STARTKEY => '', ENDKEY => ''}]
2024-12-08T04:28:47,622 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithMergeRegion-1' as deleted.
2024-12-08T04:28:47,622 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733632127622"}]},"ts":"9223372036854775807"}
2024-12-08T04:28:47,626 INFO  [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportFileSystemStateWithMergeRegion-1 state from META
2024-12-08T04:28:47,628 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=128, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1
2024-12-08T04:28:47,630 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=128, state=SUCCESS; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 71 msec
2024-12-08T04:28:47,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128
2024-12-08T04:28:47,674 INFO  [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1, procId: 128 completed
2024-12-08T04:28:47,675 INFO  [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportFileSystemStateWithMergeRegion
2024-12-08T04:28:47,675 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithMergeRegion
2024-12-08T04:28:47,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure2.ProcedureExecutor(1098): Stored pid=129, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion
2024-12-08T04:28:47,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=129
2024-12-08T04:28:47,679 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733632127679"}]},"ts":"1733632127679"}
2024-12-08T04:28:47,681 INFO  [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLING in hbase:meta
2024-12-08T04:28:47,690 INFO  [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLING
2024-12-08T04:28:47,691 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=130, ppid=129, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion}]
2024-12-08T04:28:47,693 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=131, ppid=130, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=cbc84221475ff3a867edc4c51f6c6129, UNASSIGN}, {pid=132, ppid=130, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=33c5526d7c6d067212022d154e4122df, UNASSIGN}]
2024-12-08T04:28:47,699 INFO  [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=132, ppid=130, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=33c5526d7c6d067212022d154e4122df, UNASSIGN
2024-12-08T04:28:47,699 INFO  [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=131, ppid=130, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=cbc84221475ff3a867edc4c51f6c6129, UNASSIGN
2024-12-08T04:28:47,700 INFO  [PEWorker-1 {}] assignment.RegionStateStore(202): pid=132 updating hbase:meta row=33c5526d7c6d067212022d154e4122df, regionState=CLOSING, regionLocation=428ded7e54d6,46421,1733631984115
2024-12-08T04:28:47,700 INFO  [PEWorker-4 {}] assignment.RegionStateStore(202): pid=131 updating hbase:meta row=cbc84221475ff3a867edc4c51f6c6129, regionState=CLOSING, regionLocation=428ded7e54d6,41743,1733631984189
2024-12-08T04:28:47,702 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false
2024-12-08T04:28:47,702 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE; CloseRegionProcedure 33c5526d7c6d067212022d154e4122df, server=428ded7e54d6,46421,1733631984115}]
2024-12-08T04:28:47,703 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false
2024-12-08T04:28:47,703 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=134, ppid=131, state=RUNNABLE; CloseRegionProcedure cbc84221475ff3a867edc4c51f6c6129, server=428ded7e54d6,41743,1733631984189}]
2024-12-08T04:28:47,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=129
2024-12-08T04:28:47,855 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,46421,1733631984115
2024-12-08T04:28:47,855 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] handler.UnassignRegionHandler(124): Close 33c5526d7c6d067212022d154e4122df
2024-12-08T04:28:47,855 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false
2024-12-08T04:28:47,855 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1681): Closing 33c5526d7c6d067212022d154e4122df, disabling compactions & flushes
2024-12-08T04:28:47,855 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1733632105803.33c5526d7c6d067212022d154e4122df.
2024-12-08T04:28:47,856 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733632105803.33c5526d7c6d067212022d154e4122df.
2024-12-08T04:28:47,856 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733632105803.33c5526d7c6d067212022d154e4122df. after waiting 0 ms
2024-12-08T04:28:47,856 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1733632105803.33c5526d7c6d067212022d154e4122df.
2024-12-08T04:28:47,857 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,41743,1733631984189
2024-12-08T04:28:47,857 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] handler.UnassignRegionHandler(124): Close cbc84221475ff3a867edc4c51f6c6129
2024-12-08T04:28:47,857 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false
2024-12-08T04:28:47,857 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1681): Closing cbc84221475ff3a867edc4c51f6c6129, disabling compactions & flushes
2024-12-08T04:28:47,857 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1733632105803.cbc84221475ff3a867edc4c51f6c6129.
2024-12-08T04:28:47,857 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733632105803.cbc84221475ff3a867edc4c51f6c6129.
2024-12-08T04:28:47,857 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733632105803.cbc84221475ff3a867edc4c51f6c6129. after waiting 0 ms
2024-12-08T04:28:47,857 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1733632105803.cbc84221475ff3a867edc4c51f6c6129.
2024-12-08T04:28:47,868 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion/33c5526d7c6d067212022d154e4122df/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1
2024-12-08T04:28:47,868 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion/cbc84221475ff3a867edc4c51f6c6129/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1
2024-12-08T04:28:47,869 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController
2024-12-08T04:28:47,869 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1733632105803.33c5526d7c6d067212022d154e4122df.
2024-12-08T04:28:47,869 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1635): Region close journal for 33c5526d7c6d067212022d154e4122df:

2024-12-08T04:28:47,871 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] handler.UnassignRegionHandler(170): Closed 33c5526d7c6d067212022d154e4122df
2024-12-08T04:28:47,871 INFO  [PEWorker-3 {}] assignment.RegionStateStore(202): pid=132 updating hbase:meta row=33c5526d7c6d067212022d154e4122df, regionState=CLOSED
2024-12-08T04:28:47,875 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=133, resume processing ppid=132
2024-12-08T04:28:47,875 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=133, ppid=132, state=SUCCESS; CloseRegionProcedure 33c5526d7c6d067212022d154e4122df, server=428ded7e54d6,46421,1733631984115 in 171 msec
2024-12-08T04:28:47,876 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=132, ppid=130, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=33c5526d7c6d067212022d154e4122df, UNASSIGN in 182 msec
2024-12-08T04:28:47,879 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController
2024-12-08T04:28:47,879 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion,,1733632105803.cbc84221475ff3a867edc4c51f6c6129.
2024-12-08T04:28:47,879 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1635): Region close journal for cbc84221475ff3a867edc4c51f6c6129:

2024-12-08T04:28:47,881 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] handler.UnassignRegionHandler(170): Closed cbc84221475ff3a867edc4c51f6c6129
2024-12-08T04:28:47,881 INFO  [PEWorker-2 {}] assignment.RegionStateStore(202): pid=131 updating hbase:meta row=cbc84221475ff3a867edc4c51f6c6129, regionState=CLOSED
2024-12-08T04:28:47,884 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=134, resume processing ppid=131
2024-12-08T04:28:47,884 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=134, ppid=131, state=SUCCESS; CloseRegionProcedure cbc84221475ff3a867edc4c51f6c6129, server=428ded7e54d6,41743,1733631984189 in 179 msec
2024-12-08T04:28:47,885 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=131, resume processing ppid=130
2024-12-08T04:28:47,885 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=131, ppid=130, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=cbc84221475ff3a867edc4c51f6c6129, UNASSIGN in 191 msec
2024-12-08T04:28:47,887 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=130, resume processing ppid=129
2024-12-08T04:28:47,887 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=130, ppid=129, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 195 msec
2024-12-08T04:28:47,889 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733632127888"}]},"ts":"1733632127888"}
2024-12-08T04:28:47,890 INFO  [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLED in hbase:meta
2024-12-08T04:28:47,892 INFO  [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLED
2024-12-08T04:28:47,894 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=129, state=SUCCESS; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 218 msec
2024-12-08T04:28:47,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=129
2024-12-08T04:28:47,981 INFO  [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion, procId: 129 completed
2024-12-08T04:28:47,982 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithMergeRegion
2024-12-08T04:28:47,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure2.ProcedureExecutor(1098): Stored pid=135, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion
2024-12-08T04:28:47,984 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=135, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion
2024-12-08T04:28:47,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion
2024-12-08T04:28:47,985 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=135, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion
2024-12-08T04:28:47,986 INFO  [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41743 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion
2024-12-08T04:28:47,988 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion/cbc84221475ff3a867edc4c51f6c6129
2024-12-08T04:28:47,988 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion/33c5526d7c6d067212022d154e4122df
2024-12-08T04:28:47,990 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion
2024-12-08T04:28:47,990 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion
2024-12-08T04:28:47,990 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46421-0x1006fe072e80002, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion
2024-12-08T04:28:47,990 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion
2024-12-08T04:28:47,991 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF
2024-12-08T04:28:47,991 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF
2024-12-08T04:28:47,992 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:28:47,992 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data null
2024-12-08T04:28:47,992 INFO  [zk-permission-watcher-pool-0 {}] access.AuthManager(135): Skipping permission cache refresh because writable data is empty
2024-12-08T04:28:47,992 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data null
2024-12-08T04:28:47,993 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46421-0x1006fe072e80002, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:28:47,993 INFO  [zk-permission-watcher-pool-0 {}] access.AuthManager(135): Skipping permission cache refresh because writable data is empty
2024-12-08T04:28:47,993 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion
2024-12-08T04:28:47,993 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:28:47,993 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion
2024-12-08T04:28:47,993 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:28:47,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=135
2024-12-08T04:28:47,998 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion/33c5526d7c6d067212022d154e4122df/cf, FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion/33c5526d7c6d067212022d154e4122df/recovered.edits]
2024-12-08T04:28:47,998 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion/cbc84221475ff3a867edc4c51f6c6129/cf, FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion/cbc84221475ff3a867edc4c51f6c6129/recovered.edits]
2024-12-08T04:28:48,003 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion/33c5526d7c6d067212022d154e4122df/cf/4c39cb2cab424ca98e799f628acaa1f3 to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/33c5526d7c6d067212022d154e4122df/cf/4c39cb2cab424ca98e799f628acaa1f3
2024-12-08T04:28:48,003 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion/cbc84221475ff3a867edc4c51f6c6129/cf/ae52ef979ba74ae093e4ab1ac55b990d to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/cbc84221475ff3a867edc4c51f6c6129/cf/ae52ef979ba74ae093e4ab1ac55b990d
2024-12-08T04:28:48,007 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion/cbc84221475ff3a867edc4c51f6c6129/recovered.edits/9.seqid to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/cbc84221475ff3a867edc4c51f6c6129/recovered.edits/9.seqid
2024-12-08T04:28:48,008 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion/cbc84221475ff3a867edc4c51f6c6129
2024-12-08T04:28:48,017 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion/33c5526d7c6d067212022d154e4122df/recovered.edits/9.seqid to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/33c5526d7c6d067212022d154e4122df/recovered.edits/9.seqid
2024-12-08T04:28:48,018 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithMergeRegion/33c5526d7c6d067212022d154e4122df
2024-12-08T04:28:48,018 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion regions
2024-12-08T04:28:48,021 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=135, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion
2024-12-08T04:28:48,024 WARN  [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithMergeRegion from hbase:meta
2024-12-08T04:28:48,026 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithMergeRegion' descriptor.
2024-12-08T04:28:48,028 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=135, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion
2024-12-08T04:28:48,028 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithMergeRegion' from region states.
2024-12-08T04:28:48,028 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1733632105803.cbc84221475ff3a867edc4c51f6c6129.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733632128028"}]},"ts":"9223372036854775807"}
2024-12-08T04:28:48,028 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1733632105803.33c5526d7c6d067212022d154e4122df.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733632128028"}]},"ts":"9223372036854775807"}
2024-12-08T04:28:48,030 INFO  [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META
2024-12-08T04:28:48,030 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => cbc84221475ff3a867edc4c51f6c6129, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1733632105803.cbc84221475ff3a867edc4c51f6c6129.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 33c5526d7c6d067212022d154e4122df, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1733632105803.33c5526d7c6d067212022d154e4122df.', STARTKEY => '1', ENDKEY => ''}]
2024-12-08T04:28:48,030 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithMergeRegion' as deleted.
2024-12-08T04:28:48,030 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733632128030"}]},"ts":"9223372036854775807"}
2024-12-08T04:28:48,032 INFO  [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportFileSystemStateWithMergeRegion state from META
2024-12-08T04:28:48,034 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=135, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion
2024-12-08T04:28:48,035 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=135, state=SUCCESS; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 52 msec
2024-12-08T04:28:48,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=135
2024-12-08T04:28:48,097 INFO  [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion, procId: 135 completed
2024-12-08T04:28:48,106 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithMergeRegion"

2024-12-08T04:28:48,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithMergeRegion
2024-12-08T04:28:48,110 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion"

2024-12-08T04:28:48,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion
2024-12-08T04:28:48,113 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion-1"

2024-12-08T04:28:48,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion-1
2024-12-08T04:28:48,138 INFO  [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=802 (was 790)
Potentially hanging thread: RPCClient-NioEventLoopGroup-6-6
	java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
	java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
	app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62)
	app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883)
	app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526)
	app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
	app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
	app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1681678781_1 at /127.0.0.1:40922 [Waiting for operation #2]
	java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
	java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
	app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335)
	app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131)
	java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244)
	java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263)
	java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334)
	java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312)
	app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72)
	app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: RPCClient-NioEventLoopGroup-6-7
	java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
	java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
	app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62)
	app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883)
	app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526)
	app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
	app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
	app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: ApplicationMasterLauncher #9
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: HFileArchiver-15
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: ApplicationMasterLauncher #10
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1548841327_22 at /127.0.0.1:57668 [Waiting for operation #7]
	java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
	java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
	app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335)
	app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131)
	java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244)
	java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263)
	java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334)
	java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312)
	app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72)
	app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: hconnection-0x28111a62-shared-pool-32
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: hconnection-0x28111a62-shared-pool-33
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: hconnection-0x28111a62-shared-pool-31
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: HFileArchiver-14
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: HFileArchiver-11
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: hconnection-0x28111a62-shared-pool-30
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1548841327_22 at /127.0.0.1:59766 [Waiting for operation #3]
	java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
	java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
	app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335)
	app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131)
	java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244)
	java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263)
	java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334)
	java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312)
	app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72)
	app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1681678781_1 at /127.0.0.1:59754 [Waiting for operation #2]
	java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
	java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
	app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335)
	app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131)
	java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244)
	java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263)
	java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334)
	java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312)
	app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72)
	app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: IPC Client (30462390) connection to localhost/127.0.0.1:40851 from jenkins
	java.base@17.0.11/java.lang.Object.wait(Native Method)
	app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042)
	app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093)

Potentially hanging thread: HFileArchiver-13
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: hconnection-0x28111a62-shared-pool-29
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: Thread-4732
	java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method)
	java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276)
	java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282)
	java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343)
	java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281)
	java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324)
	java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189)
	java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177)
	java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162)
	java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329)
	java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396)
	app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025)

Potentially hanging thread: HFileArchiver-12
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/428ded7e54d6:0-0
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: hconnection-0x28111a62-shared-pool-28
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: RPCClient-NioEventLoopGroup-6-5
	java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
	java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
	app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62)
	app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883)
	app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526)
	app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
	app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
	app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40851
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704)
	java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903)
	app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: process reaper (pid 31470)
	java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method)
	java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1548841327_22 at /127.0.0.1:40936 [Waiting for operation #4]
	java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
	java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
	app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335)
	app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131)
	java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244)
	java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263)
	java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334)
	java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312)
	app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72)
	app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
 - Thread LEAK? -, OpenFileDescriptor=813 (was 802) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=628 (was 614) - SystemLoadAverage LEAK? -, ProcessCount=20 (was 20), AvailableMemoryMB=3110 (was 3314)
2024-12-08T04:28:48,139 WARN  [Time-limited test {}] hbase.ResourceChecker(130): Thread=802 is superior to 500
2024-12-08T04:28:48,160 INFO  [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportExpiredSnapshot Thread=802, OpenFileDescriptor=813, MaxFileDescriptor=1048576, SystemLoadAverage=628, ProcessCount=20, AvailableMemoryMB=3110
2024-12-08T04:28:48,160 WARN  [Time-limited test {}] hbase.ResourceChecker(130): Thread=802 is superior to 500
2024-12-08T04:28:48,162 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}
2024-12-08T04:28:48,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure2.ProcedureExecutor(1098): Stored pid=136, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportExpiredSnapshot
2024-12-08T04:28:48,163 INFO  [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=136, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION
2024-12-08T04:28:48,164 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker
2024-12-08T04:28:48,164 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default"
qualifier: "testtb-testExportExpiredSnapshot"
 procId is: 136
2024-12-08T04:28:48,164 INFO  [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=136, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT
2024-12-08T04:28:48,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136
2024-12-08T04:28:48,177 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742164_1340 (size=407)
2024-12-08T04:28:48,177 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742164_1340 (size=407)
2024-12-08T04:28:48,178 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742164_1340 (size=407)
2024-12-08T04:28:48,180 INFO  [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => d014bceee65678ef1e8d4fae8969b87e, NAME => 'testtb-testExportExpiredSnapshot,,1733632128161.d014bceee65678ef1e8d4fae8969b87e.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720
2024-12-08T04:28:48,183 INFO  [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => f790403a55ef7cc492738ed15e40d4cd, NAME => 'testtb-testExportExpiredSnapshot,1,1733632128161.f790403a55ef7cc492738ed15e40d4cd.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720
2024-12-08T04:28:48,192 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742165_1341 (size=68)
2024-12-08T04:28:48,193 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742165_1341 (size=68)
2024-12-08T04:28:48,193 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742165_1341 (size=68)
2024-12-08T04:28:48,194 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportExpiredSnapshot,,1733632128161.d014bceee65678ef1e8d4fae8969b87e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable
2024-12-08T04:28:48,194 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1681): Closing d014bceee65678ef1e8d4fae8969b87e, disabling compactions & flushes
2024-12-08T04:28:48,194 INFO  [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportExpiredSnapshot,,1733632128161.d014bceee65678ef1e8d4fae8969b87e.
2024-12-08T04:28:48,194 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportExpiredSnapshot,,1733632128161.d014bceee65678ef1e8d4fae8969b87e.
2024-12-08T04:28:48,194 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportExpiredSnapshot,,1733632128161.d014bceee65678ef1e8d4fae8969b87e. after waiting 0 ms
2024-12-08T04:28:48,194 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportExpiredSnapshot,,1733632128161.d014bceee65678ef1e8d4fae8969b87e.
2024-12-08T04:28:48,194 INFO  [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportExpiredSnapshot,,1733632128161.d014bceee65678ef1e8d4fae8969b87e.
2024-12-08T04:28:48,194 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1635): Region close journal for d014bceee65678ef1e8d4fae8969b87e:

2024-12-08T04:28:48,205 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742166_1342 (size=68)
2024-12-08T04:28:48,205 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742166_1342 (size=68)
2024-12-08T04:28:48,205 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742166_1342 (size=68)
2024-12-08T04:28:48,206 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportExpiredSnapshot,1,1733632128161.f790403a55ef7cc492738ed15e40d4cd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable
2024-12-08T04:28:48,206 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1681): Closing f790403a55ef7cc492738ed15e40d4cd, disabling compactions & flushes
2024-12-08T04:28:48,206 INFO  [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportExpiredSnapshot,1,1733632128161.f790403a55ef7cc492738ed15e40d4cd.
2024-12-08T04:28:48,207 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportExpiredSnapshot,1,1733632128161.f790403a55ef7cc492738ed15e40d4cd.
2024-12-08T04:28:48,207 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportExpiredSnapshot,1,1733632128161.f790403a55ef7cc492738ed15e40d4cd. after waiting 0 ms
2024-12-08T04:28:48,207 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportExpiredSnapshot,1,1733632128161.f790403a55ef7cc492738ed15e40d4cd.
2024-12-08T04:28:48,207 INFO  [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportExpiredSnapshot,1,1733632128161.f790403a55ef7cc492738ed15e40d4cd.
2024-12-08T04:28:48,207 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1635): Region close journal for f790403a55ef7cc492738ed15e40d4cd:

2024-12-08T04:28:48,210 INFO  [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=136, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META
2024-12-08T04:28:48,210 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,,1733632128161.d014bceee65678ef1e8d4fae8969b87e.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733632128210"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733632128210"}]},"ts":"1733632128210"}
2024-12-08T04:28:48,210 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,1,1733632128161.f790403a55ef7cc492738ed15e40d4cd.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733632128210"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733632128210"}]},"ts":"1733632128210"}
2024-12-08T04:28:48,212 INFO  [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta.
2024-12-08T04:28:48,213 INFO  [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=136, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS
2024-12-08T04:28:48,213 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733632128213"}]},"ts":"1733632128213"}
2024-12-08T04:28:48,215 INFO  [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLING in hbase:meta
2024-12-08T04:28:48,218 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(202): Hosts are {428ded7e54d6=0} racks are {/default-rack=0}
2024-12-08T04:28:48,220 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 is on host 0
2024-12-08T04:28:48,220 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 is on host 0
2024-12-08T04:28:48,220 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 is on host 0
2024-12-08T04:28:48,220 INFO  [PEWorker-2 {}] balancer.BalancerClusterState(314): server 0 is on rack 0
2024-12-08T04:28:48,220 INFO  [PEWorker-2 {}] balancer.BalancerClusterState(314): server 1 is on rack 0
2024-12-08T04:28:48,220 INFO  [PEWorker-2 {}] balancer.BalancerClusterState(314): server 2 is on rack 0
2024-12-08T04:28:48,220 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1
2024-12-08T04:28:48,220 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=d014bceee65678ef1e8d4fae8969b87e, ASSIGN}, {pid=138, ppid=136, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=f790403a55ef7cc492738ed15e40d4cd, ASSIGN}]
2024-12-08T04:28:48,221 INFO  [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=138, ppid=136, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=f790403a55ef7cc492738ed15e40d4cd, ASSIGN
2024-12-08T04:28:48,221 INFO  [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=137, ppid=136, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=d014bceee65678ef1e8d4fae8969b87e, ASSIGN
2024-12-08T04:28:48,222 INFO  [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=138, ppid=136, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=f790403a55ef7cc492738ed15e40d4cd, ASSIGN; state=OFFLINE, location=428ded7e54d6,45955,1733631983994; forceNewPlan=false, retain=false
2024-12-08T04:28:48,222 INFO  [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=137, ppid=136, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=d014bceee65678ef1e8d4fae8969b87e, ASSIGN; state=OFFLINE, location=428ded7e54d6,46421,1733631984115; forceNewPlan=false, retain=false
2024-12-08T04:28:48,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136
2024-12-08T04:28:48,372 INFO  [428ded7e54d6:46337 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 
2024-12-08T04:28:48,373 INFO  [PEWorker-3 {}] assignment.RegionStateStore(202): pid=137 updating hbase:meta row=d014bceee65678ef1e8d4fae8969b87e, regionState=OPENING, regionLocation=428ded7e54d6,46421,1733631984115
2024-12-08T04:28:48,373 INFO  [PEWorker-5 {}] assignment.RegionStateStore(202): pid=138 updating hbase:meta row=f790403a55ef7cc492738ed15e40d4cd, regionState=OPENING, regionLocation=428ded7e54d6,45955,1733631983994
2024-12-08T04:28:48,375 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=139, ppid=137, state=RUNNABLE; OpenRegionProcedure d014bceee65678ef1e8d4fae8969b87e, server=428ded7e54d6,46421,1733631984115}]
2024-12-08T04:28:48,376 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=140, ppid=138, state=RUNNABLE; OpenRegionProcedure f790403a55ef7cc492738ed15e40d4cd, server=428ded7e54d6,45955,1733631983994}]
2024-12-08T04:28:48,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136
2024-12-08T04:28:48,528 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,46421,1733631984115
2024-12-08T04:28:48,530 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,45955,1733631983994
2024-12-08T04:28:48,530 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] handler.AssignRegionHandler(135): Open testtb-testExportExpiredSnapshot,,1733632128161.d014bceee65678ef1e8d4fae8969b87e.
2024-12-08T04:28:48,531 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(7285): Opening region: {ENCODED => d014bceee65678ef1e8d4fae8969b87e, NAME => 'testtb-testExportExpiredSnapshot,,1733632128161.d014bceee65678ef1e8d4fae8969b87e.', STARTKEY => '', ENDKEY => '1'}
2024-12-08T04:28:48,531 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,,1733632128161.d014bceee65678ef1e8d4fae8969b87e. service=AccessControlService
2024-12-08T04:28:48,531 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911.
2024-12-08T04:28:48,531 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot d014bceee65678ef1e8d4fae8969b87e
2024-12-08T04:28:48,532 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(894): Instantiated testtb-testExportExpiredSnapshot,,1733632128161.d014bceee65678ef1e8d4fae8969b87e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable
2024-12-08T04:28:48,532 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(7327): checking encryption for d014bceee65678ef1e8d4fae8969b87e
2024-12-08T04:28:48,532 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(7330): checking classloading for d014bceee65678ef1e8d4fae8969b87e
2024-12-08T04:28:48,532 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] handler.AssignRegionHandler(135): Open testtb-testExportExpiredSnapshot,1,1733632128161.f790403a55ef7cc492738ed15e40d4cd.
2024-12-08T04:28:48,532 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(7285): Opening region: {ENCODED => f790403a55ef7cc492738ed15e40d4cd, NAME => 'testtb-testExportExpiredSnapshot,1,1733632128161.f790403a55ef7cc492738ed15e40d4cd.', STARTKEY => '1', ENDKEY => ''}
2024-12-08T04:28:48,533 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,1,1733632128161.f790403a55ef7cc492738ed15e40d4cd. service=AccessControlService
2024-12-08T04:28:48,533 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911.
2024-12-08T04:28:48,533 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot f790403a55ef7cc492738ed15e40d4cd
2024-12-08T04:28:48,533 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(894): Instantiated testtb-testExportExpiredSnapshot,1,1733632128161.f790403a55ef7cc492738ed15e40d4cd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable
2024-12-08T04:28:48,533 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(7327): checking encryption for f790403a55ef7cc492738ed15e40d4cd
2024-12-08T04:28:48,533 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(7330): checking classloading for f790403a55ef7cc492738ed15e40d4cd
2024-12-08T04:28:48,533 INFO  [StoreOpener-d014bceee65678ef1e8d4fae8969b87e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region d014bceee65678ef1e8d4fae8969b87e 
2024-12-08T04:28:48,534 INFO  [StoreOpener-f790403a55ef7cc492738ed15e40d4cd-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region f790403a55ef7cc492738ed15e40d4cd 
2024-12-08T04:28:48,535 INFO  [StoreOpener-d014bceee65678ef1e8d4fae8969b87e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d014bceee65678ef1e8d4fae8969b87e columnFamilyName cf
2024-12-08T04:28:48,535 DEBUG [StoreOpener-d014bceee65678ef1e8d4fae8969b87e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker
2024-12-08T04:28:48,535 INFO  [StoreOpener-d014bceee65678ef1e8d4fae8969b87e-1 {}] regionserver.HStore(327): Store=d014bceee65678ef1e8d4fae8969b87e/cf,  memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE
2024-12-08T04:28:48,536 INFO  [StoreOpener-f790403a55ef7cc492738ed15e40d4cd-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f790403a55ef7cc492738ed15e40d4cd columnFamilyName cf
2024-12-08T04:28:48,536 DEBUG [StoreOpener-f790403a55ef7cc492738ed15e40d4cd-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker
2024-12-08T04:28:48,536 INFO  [StoreOpener-f790403a55ef7cc492738ed15e40d4cd-1 {}] regionserver.HStore(327): Store=f790403a55ef7cc492738ed15e40d4cd/cf,  memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE
2024-12-08T04:28:48,536 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportExpiredSnapshot/d014bceee65678ef1e8d4fae8969b87e
2024-12-08T04:28:48,537 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportExpiredSnapshot/d014bceee65678ef1e8d4fae8969b87e
2024-12-08T04:28:48,537 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportExpiredSnapshot/f790403a55ef7cc492738ed15e40d4cd
2024-12-08T04:28:48,537 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportExpiredSnapshot/f790403a55ef7cc492738ed15e40d4cd
2024-12-08T04:28:48,539 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(1085): writing seq id for d014bceee65678ef1e8d4fae8969b87e
2024-12-08T04:28:48,539 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(1085): writing seq id for f790403a55ef7cc492738ed15e40d4cd
2024-12-08T04:28:48,541 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportExpiredSnapshot/d014bceee65678ef1e8d4fae8969b87e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1
2024-12-08T04:28:48,541 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportExpiredSnapshot/f790403a55ef7cc492738ed15e40d4cd/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1
2024-12-08T04:28:48,542 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(1102): Opened d014bceee65678ef1e8d4fae8969b87e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69592412, jitterRate=0.03700774908065796}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1}
2024-12-08T04:28:48,542 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(1102): Opened f790403a55ef7cc492738ed15e40d4cd; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68152604, jitterRate=0.015552937984466553}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1}
2024-12-08T04:28:48,543 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(1001): Region open journal for d014bceee65678ef1e8d4fae8969b87e:

2024-12-08T04:28:48,543 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(1001): Region open journal for f790403a55ef7cc492738ed15e40d4cd:

2024-12-08T04:28:48,544 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportExpiredSnapshot,1,1733632128161.f790403a55ef7cc492738ed15e40d4cd., pid=140, masterSystemTime=1733632128529
2024-12-08T04:28:48,544 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportExpiredSnapshot,,1733632128161.d014bceee65678ef1e8d4fae8969b87e., pid=139, masterSystemTime=1733632128527
2024-12-08T04:28:48,545 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportExpiredSnapshot,1,1733632128161.f790403a55ef7cc492738ed15e40d4cd.
2024-12-08T04:28:48,545 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] handler.AssignRegionHandler(164): Opened testtb-testExportExpiredSnapshot,1,1733632128161.f790403a55ef7cc492738ed15e40d4cd.
2024-12-08T04:28:48,546 INFO  [PEWorker-4 {}] assignment.RegionStateStore(202): pid=138 updating hbase:meta row=f790403a55ef7cc492738ed15e40d4cd, regionState=OPEN, openSeqNum=2, regionLocation=428ded7e54d6,45955,1733631983994
2024-12-08T04:28:48,546 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportExpiredSnapshot,,1733632128161.d014bceee65678ef1e8d4fae8969b87e.
2024-12-08T04:28:48,546 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] handler.AssignRegionHandler(164): Opened testtb-testExportExpiredSnapshot,,1733632128161.d014bceee65678ef1e8d4fae8969b87e.
2024-12-08T04:28:48,547 INFO  [PEWorker-3 {}] assignment.RegionStateStore(202): pid=137 updating hbase:meta row=d014bceee65678ef1e8d4fae8969b87e, regionState=OPEN, openSeqNum=2, regionLocation=428ded7e54d6,46421,1733631984115
2024-12-08T04:28:48,549 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=140, resume processing ppid=138
2024-12-08T04:28:48,550 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=140, ppid=138, state=SUCCESS; OpenRegionProcedure f790403a55ef7cc492738ed15e40d4cd, server=428ded7e54d6,45955,1733631983994 in 171 msec
2024-12-08T04:28:48,550 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=139, resume processing ppid=137
2024-12-08T04:28:48,550 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=138, ppid=136, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=f790403a55ef7cc492738ed15e40d4cd, ASSIGN in 329 msec
2024-12-08T04:28:48,550 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=139, ppid=137, state=SUCCESS; OpenRegionProcedure d014bceee65678ef1e8d4fae8969b87e, server=428ded7e54d6,46421,1733631984115 in 173 msec
2024-12-08T04:28:48,551 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=137, resume processing ppid=136
2024-12-08T04:28:48,551 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=137, ppid=136, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=d014bceee65678ef1e8d4fae8969b87e, ASSIGN in 330 msec
2024-12-08T04:28:48,552 INFO  [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=136, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE
2024-12-08T04:28:48,552 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733632128552"}]},"ts":"1733632128552"}
2024-12-08T04:28:48,554 INFO  [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLED in hbase:meta
2024-12-08T04:28:48,556 INFO  [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=136, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION
2024-12-08T04:28:48,556 DEBUG [PEWorker-1 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportExpiredSnapshot jenkins: RWXCA
2024-12-08T04:28:48,558 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41743 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA]
2024-12-08T04:28:48,560 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:28:48,560 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:28:48,560 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:28:48,560 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46421-0x1006fe072e80002, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:28:48,561 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04
2024-12-08T04:28:48,562 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04
2024-12-08T04:28:48,562 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04
2024-12-08T04:28:48,562 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04
2024-12-08T04:28:48,564 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=136, state=SUCCESS; CreateTableProcedure table=testtb-testExportExpiredSnapshot in 399 msec
2024-12-08T04:28:48,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136
2024-12-08T04:28:48,768 INFO  [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportExpiredSnapshot, procId: 136 completed
2024-12-08T04:28:48,768 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testExportExpiredSnapshot get assigned. Timeout = 60000ms
2024-12-08T04:28:48,768 INFO  [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1])
2024-12-08T04:28:48,772 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testExportExpiredSnapshot assigned to meta. Checking AM states.
2024-12-08T04:28:48,772 INFO  [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1])
2024-12-08T04:28:48,772 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testExportExpiredSnapshot assigned.
2024-12-08T04:28:48,775 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }
2024-12-08T04:28:48,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733632128775 (current time:1733632128775).
2024-12-08T04:28:48,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0
2024-12-08T04:28:48,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2
2024-12-08T04:28:48,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot
2024-12-08T04:28:48,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x02b4a076 to 127.0.0.1:55878 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@553e7d0f
2024-12-08T04:28:48,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@58443b3d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null
2024-12-08T04:28:48,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false
2024-12-08T04:28:48,790 INFO  [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38934, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService
2024-12-08T04:28:48,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x02b4a076 to 127.0.0.1:55878
2024-12-08T04:28:48,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(514): Stopping rpc client
2024-12-08T04:28:48,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x522c9a52 to 127.0.0.1:55878 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@74dc531d
2024-12-08T04:28:48,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@db80368, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null
2024-12-08T04:28:48,804 DEBUG [hconnection-0x190c9967-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false
2024-12-08T04:28:48,805 INFO  [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38944, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService
2024-12-08T04:28:48,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x522c9a52 to 127.0.0.1:55878
2024-12-08T04:28:48,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(514): Stopping rpc client
2024-12-08T04:28:48,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA]
2024-12-08T04:28:48,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot...
2024-12-08T04:28:48,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure2.ProcedureExecutor(1098): Stored pid=141, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }
2024-12-08T04:28:48,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 141
2024-12-08T04:28:48,811 INFO  [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE
2024-12-08T04:28:48,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=141
2024-12-08T04:28:48,812 INFO  [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION
2024-12-08T04:28:48,814 INFO  [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO
2024-12-08T04:28:48,822 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742167_1343 (size=170)
2024-12-08T04:28:48,822 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742167_1343 (size=170)
2024-12-08T04:28:48,823 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742167_1343 (size=170)
2024-12-08T04:28:48,825 INFO  [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS
2024-12-08T04:28:48,825 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=142, ppid=141, state=RUNNABLE; SnapshotRegionProcedure d014bceee65678ef1e8d4fae8969b87e}, {pid=143, ppid=141, state=RUNNABLE; SnapshotRegionProcedure f790403a55ef7cc492738ed15e40d4cd}]
2024-12-08T04:28:48,826 INFO  [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=142, ppid=141, state=RUNNABLE; SnapshotRegionProcedure d014bceee65678ef1e8d4fae8969b87e
2024-12-08T04:28:48,827 INFO  [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=143, ppid=141, state=RUNNABLE; SnapshotRegionProcedure f790403a55ef7cc492738ed15e40d4cd
2024-12-08T04:28:48,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=141
2024-12-08T04:28:48,977 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,45955,1733631983994
2024-12-08T04:28:48,977 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,46421,1733631984115
2024-12-08T04:28:48,978 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46421 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=142
2024-12-08T04:28:48,978 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45955 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=143
2024-12-08T04:28:48,978 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1733632128161.f790403a55ef7cc492738ed15e40d4cd.
2024-12-08T04:28:48,978 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1733632128161.d014bceee65678ef1e8d4fae8969b87e.
2024-12-08T04:28:48,978 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] regionserver.HRegion(2538): Flush status journal for f790403a55ef7cc492738ed15e40d4cd:

2024-12-08T04:28:48,978 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegion(2538): Flush status journal for d014bceee65678ef1e8d4fae8969b87e:

2024-12-08T04:28:48,978 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1733632128161.f790403a55ef7cc492738ed15e40d4cd. for emptySnaptb0-testExportExpiredSnapshot completed.
2024-12-08T04:28:48,978 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1733632128161.d014bceee65678ef1e8d4fae8969b87e. for emptySnaptb0-testExportExpiredSnapshot completed.
2024-12-08T04:28:48,979 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1733632128161.d014bceee65678ef1e8d4fae8969b87e.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot
2024-12-08T04:28:48,979 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1733632128161.f790403a55ef7cc492738ed15e40d4cd.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot
2024-12-08T04:28:48,979 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] snapshot.SnapshotManifest(246): Creating references for hfiles
2024-12-08T04:28:48,979 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(246): Creating references for hfiles
2024-12-08T04:28:48,979 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles
2024-12-08T04:28:48,979 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles
2024-12-08T04:28:48,994 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742168_1344 (size=71)
2024-12-08T04:28:48,995 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742168_1344 (size=71)
2024-12-08T04:28:48,995 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742168_1344 (size=71)
2024-12-08T04:28:48,996 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1733632128161.d014bceee65678ef1e8d4fae8969b87e.
2024-12-08T04:28:48,996 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=142
2024-12-08T04:28:48,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster(4106): Remote procedure done, pid=142
2024-12-08T04:28:48,996 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region d014bceee65678ef1e8d4fae8969b87e
2024-12-08T04:28:48,997 INFO  [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=142, ppid=141, state=RUNNABLE; SnapshotRegionProcedure d014bceee65678ef1e8d4fae8969b87e
2024-12-08T04:28:48,998 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=142, ppid=141, state=SUCCESS; SnapshotRegionProcedure d014bceee65678ef1e8d4fae8969b87e in 172 msec
2024-12-08T04:28:49,002 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742169_1345 (size=71)
2024-12-08T04:28:49,002 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742169_1345 (size=71)
2024-12-08T04:28:49,002 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742169_1345 (size=71)
2024-12-08T04:28:49,002 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1733632128161.f790403a55ef7cc492738ed15e40d4cd.
2024-12-08T04:28:49,002 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=143
2024-12-08T04:28:49,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster(4106): Remote procedure done, pid=143
2024-12-08T04:28:49,003 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region f790403a55ef7cc492738ed15e40d4cd
2024-12-08T04:28:49,003 INFO  [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=143, ppid=141, state=RUNNABLE; SnapshotRegionProcedure f790403a55ef7cc492738ed15e40d4cd
2024-12-08T04:28:49,005 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=143, resume processing ppid=141
2024-12-08T04:28:49,005 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=143, ppid=141, state=SUCCESS; SnapshotRegionProcedure f790403a55ef7cc492738ed15e40d4cd in 179 msec
2024-12-08T04:28:49,005 INFO  [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS
2024-12-08T04:28:49,006 INFO  [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION
2024-12-08T04:28:49,006 INFO  [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT
2024-12-08T04:28:49,006 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportExpiredSnapshot
2024-12-08T04:28:49,007 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot
2024-12-08T04:28:49,025 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742170_1346 (size=552)
2024-12-08T04:28:49,026 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742170_1346 (size=552)
2024-12-08T04:28:49,026 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742170_1346 (size=552)
2024-12-08T04:28:49,028 INFO  [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT
2024-12-08T04:28:49,033 INFO  [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT
2024-12-08T04:28:49,034 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/emptySnaptb0-testExportExpiredSnapshot
2024-12-08T04:28:49,043 INFO  [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION
2024-12-08T04:28:49,043 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 141
2024-12-08T04:28:49,045 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=141, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 235 msec
2024-12-08T04:28:49,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=141
2024-12-08T04:28:49,113 INFO  [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot, procId: 141 completed
2024-12-08T04:28:49,120 INFO  [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46421 {}] regionserver.HRegion(8254): writing data to region testtb-testExportExpiredSnapshot,,1733632128161.d014bceee65678ef1e8d4fae8969b87e. with WAL disabled. Data may be lost in the event of a crash.
2024-12-08T04:28:49,121 INFO  [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45955 {}] regionserver.HRegion(8254): writing data to region testtb-testExportExpiredSnapshot,1,1733632128161.f790403a55ef7cc492738ed15e40d4cd. with WAL disabled. Data may be lost in the event of a crash.
2024-12-08T04:28:49,125 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportExpiredSnapshot
2024-12-08T04:28:49,125 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportExpiredSnapshot,,1733632128161.d014bceee65678ef1e8d4fae8969b87e.
2024-12-08T04:28:49,125 INFO  [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1])
2024-12-08T04:28:49,137 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }
2024-12-08T04:28:49,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733632129137 (current time:1733632129137).
2024-12-08T04:28:49,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0
2024-12-08T04:28:49,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2
2024-12-08T04:28:49,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot
2024-12-08T04:28:49,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x74787359 to 127.0.0.1:55878 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@9c21b10
2024-12-08T04:28:49,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@57ab0a8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null
2024-12-08T04:28:49,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false
2024-12-08T04:28:49,144 INFO  [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38952, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService
2024-12-08T04:28:49,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x74787359 to 127.0.0.1:55878
2024-12-08T04:28:49,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(514): Stopping rpc client
2024-12-08T04:28:49,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3f16d8b9 to 127.0.0.1:55878 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@26155ed0
2024-12-08T04:28:49,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76ba554a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null
2024-12-08T04:28:49,151 DEBUG [hconnection-0x62ef92c6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false
2024-12-08T04:28:49,152 INFO  [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38966, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService
2024-12-08T04:28:49,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3f16d8b9 to 127.0.0.1:55878
2024-12-08T04:28:49,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(514): Stopping rpc client
2024-12-08T04:28:49,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA]
2024-12-08T04:28:49,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot...
2024-12-08T04:28:49,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure2.ProcedureExecutor(1098): Stored pid=144, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }
2024-12-08T04:28:49,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 144
2024-12-08T04:28:49,157 INFO  [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE
2024-12-08T04:28:49,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144
2024-12-08T04:28:49,158 INFO  [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION
2024-12-08T04:28:49,160 INFO  [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO
2024-12-08T04:28:49,167 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742171_1347 (size=165)
2024-12-08T04:28:49,168 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742171_1347 (size=165)
2024-12-08T04:28:49,168 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742171_1347 (size=165)
2024-12-08T04:28:49,169 INFO  [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS
2024-12-08T04:28:49,169 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=145, ppid=144, state=RUNNABLE; SnapshotRegionProcedure d014bceee65678ef1e8d4fae8969b87e}, {pid=146, ppid=144, state=RUNNABLE; SnapshotRegionProcedure f790403a55ef7cc492738ed15e40d4cd}]
2024-12-08T04:28:49,170 INFO  [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=146, ppid=144, state=RUNNABLE; SnapshotRegionProcedure f790403a55ef7cc492738ed15e40d4cd
2024-12-08T04:28:49,170 INFO  [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=145, ppid=144, state=RUNNABLE; SnapshotRegionProcedure d014bceee65678ef1e8d4fae8969b87e
2024-12-08T04:28:49,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144
2024-12-08T04:28:49,321 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,45955,1733631983994
2024-12-08T04:28:49,321 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,46421,1733631984115
2024-12-08T04:28:49,322 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46421 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=145
2024-12-08T04:28:49,322 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45955 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=146
2024-12-08T04:28:49,322 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1733632128161.d014bceee65678ef1e8d4fae8969b87e.
2024-12-08T04:28:49,322 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1733632128161.f790403a55ef7cc492738ed15e40d4cd.
2024-12-08T04:28:49,322 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.HRegion(2837): Flushing d014bceee65678ef1e8d4fae8969b87e 1/1 column families, dataSize=199 B heapSize=688 B
2024-12-08T04:28:49,322 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.HRegion(2837): Flushing f790403a55ef7cc492738ed15e40d4cd 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB
2024-12-08T04:28:49,353 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportExpiredSnapshot/d014bceee65678ef1e8d4fae8969b87e/.tmp/cf/e8f89453e92d4ebb80ce3e279c1f4a73 is 71, key is 052d451862812762328a7b48d6f0b2e7/cf:q/1733632129120/Put/seqid=0
2024-12-08T04:28:49,356 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportExpiredSnapshot/f790403a55ef7cc492738ed15e40d4cd/.tmp/cf/e19aab93752e4b15a0e83825192a375c is 71, key is 1767a9c2cec513d268dac1ec7216ba8b/cf:q/1733632129121/Put/seqid=0
2024-12-08T04:28:49,366 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742172_1348 (size=5288)
2024-12-08T04:28:49,367 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742173_1349 (size=8324)
2024-12-08T04:28:49,367 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742172_1348 (size=5288)
2024-12-08T04:28:49,367 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742172_1348 (size=5288)
2024-12-08T04:28:49,368 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=199 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportExpiredSnapshot/d014bceee65678ef1e8d4fae8969b87e/.tmp/cf/e8f89453e92d4ebb80ce3e279c1f4a73
2024-12-08T04:28:49,368 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742173_1349 (size=8324)
2024-12-08T04:28:49,369 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742173_1349 (size=8324)
2024-12-08T04:28:49,369 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.06 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportExpiredSnapshot/f790403a55ef7cc492738ed15e40d4cd/.tmp/cf/e19aab93752e4b15a0e83825192a375c
2024-12-08T04:28:49,375 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportExpiredSnapshot/d014bceee65678ef1e8d4fae8969b87e/.tmp/cf/e8f89453e92d4ebb80ce3e279c1f4a73 as hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportExpiredSnapshot/d014bceee65678ef1e8d4fae8969b87e/cf/e8f89453e92d4ebb80ce3e279c1f4a73
2024-12-08T04:28:49,377 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportExpiredSnapshot/f790403a55ef7cc492738ed15e40d4cd/.tmp/cf/e19aab93752e4b15a0e83825192a375c as hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportExpiredSnapshot/f790403a55ef7cc492738ed15e40d4cd/cf/e19aab93752e4b15a0e83825192a375c
2024-12-08T04:28:49,382 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportExpiredSnapshot/f790403a55ef7cc492738ed15e40d4cd/cf/e19aab93752e4b15a0e83825192a375c, entries=47, sequenceid=6, filesize=8.1 K
2024-12-08T04:28:49,383 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.HRegion(3040): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for f790403a55ef7cc492738ed15e40d4cd in 61ms, sequenceid=6, compaction requested=false
2024-12-08T04:28:49,383 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportExpiredSnapshot'
2024-12-08T04:28:49,383 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportExpiredSnapshot/d014bceee65678ef1e8d4fae8969b87e/cf/e8f89453e92d4ebb80ce3e279c1f4a73, entries=3, sequenceid=6, filesize=5.2 K
2024-12-08T04:28:49,384 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.HRegion(3040): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for d014bceee65678ef1e8d4fae8969b87e in 62ms, sequenceid=6, compaction requested=false
2024-12-08T04:28:49,384 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.HRegion(2538): Flush status journal for f790403a55ef7cc492738ed15e40d4cd:

2024-12-08T04:28:49,384 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.HRegion(2538): Flush status journal for d014bceee65678ef1e8d4fae8969b87e:

2024-12-08T04:28:49,384 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1733632128161.f790403a55ef7cc492738ed15e40d4cd. for snaptb0-testExportExpiredSnapshot completed.
2024-12-08T04:28:49,384 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1733632128161.d014bceee65678ef1e8d4fae8969b87e. for snaptb0-testExportExpiredSnapshot completed.
2024-12-08T04:28:49,384 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1733632128161.f790403a55ef7cc492738ed15e40d4cd.' region-info for snapshot=snaptb0-testExportExpiredSnapshot
2024-12-08T04:28:49,384 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] snapshot.SnapshotManifest(246): Creating references for hfiles
2024-12-08T04:28:49,384 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1733632128161.d014bceee65678ef1e8d4fae8969b87e.' region-info for snapshot=snaptb0-testExportExpiredSnapshot
2024-12-08T04:28:49,385 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportExpiredSnapshot/f790403a55ef7cc492738ed15e40d4cd/cf/e19aab93752e4b15a0e83825192a375c] hfiles
2024-12-08T04:28:49,385 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportExpiredSnapshot/f790403a55ef7cc492738ed15e40d4cd/cf/e19aab93752e4b15a0e83825192a375c for snapshot=snaptb0-testExportExpiredSnapshot
2024-12-08T04:28:49,385 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] snapshot.SnapshotManifest(246): Creating references for hfiles
2024-12-08T04:28:49,385 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportExpiredSnapshot/d014bceee65678ef1e8d4fae8969b87e/cf/e8f89453e92d4ebb80ce3e279c1f4a73] hfiles
2024-12-08T04:28:49,385 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportExpiredSnapshot/d014bceee65678ef1e8d4fae8969b87e/cf/e8f89453e92d4ebb80ce3e279c1f4a73 for snapshot=snaptb0-testExportExpiredSnapshot
2024-12-08T04:28:49,394 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742174_1350 (size=110)
2024-12-08T04:28:49,395 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742174_1350 (size=110)
2024-12-08T04:28:49,395 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742174_1350 (size=110)
2024-12-08T04:28:49,396 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1733632128161.f790403a55ef7cc492738ed15e40d4cd.
2024-12-08T04:28:49,396 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=146
2024-12-08T04:28:49,396 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742175_1351 (size=110)
2024-12-08T04:28:49,397 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742175_1351 (size=110)
2024-12-08T04:28:49,397 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1733632128161.d014bceee65678ef1e8d4fae8969b87e.
2024-12-08T04:28:49,397 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742175_1351 (size=110)
2024-12-08T04:28:49,397 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=145
2024-12-08T04:28:49,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster(4106): Remote procedure done, pid=146
2024-12-08T04:28:49,397 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region f790403a55ef7cc492738ed15e40d4cd
2024-12-08T04:28:49,397 INFO  [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=146, ppid=144, state=RUNNABLE; SnapshotRegionProcedure f790403a55ef7cc492738ed15e40d4cd
2024-12-08T04:28:49,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster(4106): Remote procedure done, pid=145
2024-12-08T04:28:49,398 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region d014bceee65678ef1e8d4fae8969b87e
2024-12-08T04:28:49,398 INFO  [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=145, ppid=144, state=RUNNABLE; SnapshotRegionProcedure d014bceee65678ef1e8d4fae8969b87e
2024-12-08T04:28:49,400 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=146, ppid=144, state=SUCCESS; SnapshotRegionProcedure f790403a55ef7cc492738ed15e40d4cd in 230 msec
2024-12-08T04:28:49,401 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=145, resume processing ppid=144
2024-12-08T04:28:49,401 INFO  [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS
2024-12-08T04:28:49,401 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=145, ppid=144, state=SUCCESS; SnapshotRegionProcedure d014bceee65678ef1e8d4fae8969b87e in 230 msec
2024-12-08T04:28:49,402 INFO  [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION
2024-12-08T04:28:49,402 INFO  [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT
2024-12-08T04:28:49,403 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportExpiredSnapshot
2024-12-08T04:28:49,403 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot
2024-12-08T04:28:49,417 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742176_1352 (size=630)
2024-12-08T04:28:49,417 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742176_1352 (size=630)
2024-12-08T04:28:49,418 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742176_1352 (size=630)
2024-12-08T04:28:49,421 INFO  [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT
2024-12-08T04:28:49,430 INFO  [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT
2024-12-08T04:28:49,430 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/snaptb0-testExportExpiredSnapshot
2024-12-08T04:28:49,432 INFO  [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION
2024-12-08T04:28:49,432 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 144
2024-12-08T04:28:49,433 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=144, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 277 msec
2024-12-08T04:28:49,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144
2024-12-08T04:28:49,461 INFO  [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot, procId: 144 completed
2024-12-08T04:28:49,462 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}
2024-12-08T04:28:49,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure2.ProcedureExecutor(1098): Stored pid=147, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testExportExpiredSnapshot
2024-12-08T04:28:49,464 INFO  [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION
2024-12-08T04:28:49,464 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker
2024-12-08T04:28:49,464 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default"
qualifier: "testExportExpiredSnapshot"
 procId is: 147
2024-12-08T04:28:49,465 INFO  [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT
2024-12-08T04:28:49,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147
2024-12-08T04:28:49,473 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742177_1353 (size=400)
2024-12-08T04:28:49,474 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742177_1353 (size=400)
2024-12-08T04:28:49,474 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742177_1353 (size=400)
2024-12-08T04:28:49,475 INFO  [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 430b6d8c8c366152be49a2e6dcaf8f87, NAME => 'testExportExpiredSnapshot,1,1733632129462.430b6d8c8c366152be49a2e6dcaf8f87.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720
2024-12-08T04:28:49,475 INFO  [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => b0618e5cec1ba295985f16f1dd465d87, NAME => 'testExportExpiredSnapshot,,1733632129462.b0618e5cec1ba295985f16f1dd465d87.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720
2024-12-08T04:28:49,485 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742178_1354 (size=61)
2024-12-08T04:28:49,485 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742178_1354 (size=61)
2024-12-08T04:28:49,486 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742179_1355 (size=61)
2024-12-08T04:28:49,486 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742178_1354 (size=61)
2024-12-08T04:28:49,487 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742179_1355 (size=61)
2024-12-08T04:28:49,487 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742179_1355 (size=61)
2024-12-08T04:28:49,487 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(894): Instantiated testExportExpiredSnapshot,1,1733632129462.430b6d8c8c366152be49a2e6dcaf8f87.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable
2024-12-08T04:28:49,487 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1681): Closing 430b6d8c8c366152be49a2e6dcaf8f87, disabling compactions & flushes
2024-12-08T04:28:49,487 INFO  [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1703): Closing region testExportExpiredSnapshot,1,1733632129462.430b6d8c8c366152be49a2e6dcaf8f87.
2024-12-08T04:28:49,487 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportExpiredSnapshot,1,1733632129462.430b6d8c8c366152be49a2e6dcaf8f87.
2024-12-08T04:28:49,487 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testExportExpiredSnapshot,1,1733632129462.430b6d8c8c366152be49a2e6dcaf8f87. after waiting 0 ms
2024-12-08T04:28:49,487 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testExportExpiredSnapshot,1,1733632129462.430b6d8c8c366152be49a2e6dcaf8f87.
2024-12-08T04:28:49,487 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(894): Instantiated testExportExpiredSnapshot,,1733632129462.b0618e5cec1ba295985f16f1dd465d87.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable
2024-12-08T04:28:49,487 INFO  [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1922): Closed testExportExpiredSnapshot,1,1733632129462.430b6d8c8c366152be49a2e6dcaf8f87.
2024-12-08T04:28:49,487 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1635): Region close journal for 430b6d8c8c366152be49a2e6dcaf8f87:

2024-12-08T04:28:49,487 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1681): Closing b0618e5cec1ba295985f16f1dd465d87, disabling compactions & flushes
2024-12-08T04:28:49,487 INFO  [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1703): Closing region testExportExpiredSnapshot,,1733632129462.b0618e5cec1ba295985f16f1dd465d87.
2024-12-08T04:28:49,488 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportExpiredSnapshot,,1733632129462.b0618e5cec1ba295985f16f1dd465d87.
2024-12-08T04:28:49,488 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testExportExpiredSnapshot,,1733632129462.b0618e5cec1ba295985f16f1dd465d87. after waiting 0 ms
2024-12-08T04:28:49,488 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testExportExpiredSnapshot,,1733632129462.b0618e5cec1ba295985f16f1dd465d87.
2024-12-08T04:28:49,488 INFO  [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1922): Closed testExportExpiredSnapshot,,1733632129462.b0618e5cec1ba295985f16f1dd465d87.
2024-12-08T04:28:49,488 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1635): Region close journal for b0618e5cec1ba295985f16f1dd465d87:

2024-12-08T04:28:49,489 INFO  [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META
2024-12-08T04:28:49,489 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,1,1733632129462.430b6d8c8c366152be49a2e6dcaf8f87.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1733632129489"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733632129489"}]},"ts":"1733632129489"}
2024-12-08T04:28:49,489 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,,1733632129462.b0618e5cec1ba295985f16f1dd465d87.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1733632129489"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733632129489"}]},"ts":"1733632129489"}
2024-12-08T04:28:49,491 INFO  [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta.
2024-12-08T04:28:49,492 INFO  [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS
2024-12-08T04:28:49,492 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733632129492"}]},"ts":"1733632129492"}
2024-12-08T04:28:49,496 INFO  [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testExportExpiredSnapshot, state=ENABLING in hbase:meta
2024-12-08T04:28:49,499 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(202): Hosts are {428ded7e54d6=0} racks are {/default-rack=0}
2024-12-08T04:28:49,501 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 is on host 0
2024-12-08T04:28:49,501 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 is on host 0
2024-12-08T04:28:49,501 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 is on host 0
2024-12-08T04:28:49,501 INFO  [PEWorker-4 {}] balancer.BalancerClusterState(314): server 0 is on rack 0
2024-12-08T04:28:49,501 INFO  [PEWorker-4 {}] balancer.BalancerClusterState(314): server 1 is on rack 0
2024-12-08T04:28:49,501 INFO  [PEWorker-4 {}] balancer.BalancerClusterState(314): server 2 is on rack 0
2024-12-08T04:28:49,501 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1
2024-12-08T04:28:49,501 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=148, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=b0618e5cec1ba295985f16f1dd465d87, ASSIGN}, {pid=149, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=430b6d8c8c366152be49a2e6dcaf8f87, ASSIGN}]
2024-12-08T04:28:49,502 INFO  [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=149, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=430b6d8c8c366152be49a2e6dcaf8f87, ASSIGN
2024-12-08T04:28:49,502 INFO  [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=148, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=b0618e5cec1ba295985f16f1dd465d87, ASSIGN
2024-12-08T04:28:49,503 INFO  [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=149, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=430b6d8c8c366152be49a2e6dcaf8f87, ASSIGN; state=OFFLINE, location=428ded7e54d6,46421,1733631984115; forceNewPlan=false, retain=false
2024-12-08T04:28:49,503 INFO  [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=148, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=b0618e5cec1ba295985f16f1dd465d87, ASSIGN; state=OFFLINE, location=428ded7e54d6,45955,1733631983994; forceNewPlan=false, retain=false
2024-12-08T04:28:49,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147
2024-12-08T04:28:49,653 INFO  [428ded7e54d6:46337 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 
2024-12-08T04:28:49,654 INFO  [PEWorker-2 {}] assignment.RegionStateStore(202): pid=148 updating hbase:meta row=b0618e5cec1ba295985f16f1dd465d87, regionState=OPENING, regionLocation=428ded7e54d6,45955,1733631983994
2024-12-08T04:28:49,654 INFO  [PEWorker-3 {}] assignment.RegionStateStore(202): pid=149 updating hbase:meta row=430b6d8c8c366152be49a2e6dcaf8f87, regionState=OPENING, regionLocation=428ded7e54d6,46421,1733631984115
2024-12-08T04:28:49,655 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=150, ppid=148, state=RUNNABLE; OpenRegionProcedure b0618e5cec1ba295985f16f1dd465d87, server=428ded7e54d6,45955,1733631983994}]
2024-12-08T04:28:49,656 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=151, ppid=149, state=RUNNABLE; OpenRegionProcedure 430b6d8c8c366152be49a2e6dcaf8f87, server=428ded7e54d6,46421,1733631984115}]
2024-12-08T04:28:49,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147
2024-12-08T04:28:49,807 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,45955,1733631983994
2024-12-08T04:28:49,808 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,46421,1733631984115
2024-12-08T04:28:49,811 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] handler.AssignRegionHandler(135): Open testExportExpiredSnapshot,1,1733632129462.430b6d8c8c366152be49a2e6dcaf8f87.
2024-12-08T04:28:49,811 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] handler.AssignRegionHandler(135): Open testExportExpiredSnapshot,,1733632129462.b0618e5cec1ba295985f16f1dd465d87.
2024-12-08T04:28:49,811 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(7285): Opening region: {ENCODED => b0618e5cec1ba295985f16f1dd465d87, NAME => 'testExportExpiredSnapshot,,1733632129462.b0618e5cec1ba295985f16f1dd465d87.', STARTKEY => '', ENDKEY => '1'}
2024-12-08T04:28:49,811 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(7285): Opening region: {ENCODED => 430b6d8c8c366152be49a2e6dcaf8f87, NAME => 'testExportExpiredSnapshot,1,1733632129462.430b6d8c8c366152be49a2e6dcaf8f87.', STARTKEY => '1', ENDKEY => ''}
2024-12-08T04:28:49,811 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(7999): Registered coprocessor service: region=testExportExpiredSnapshot,,1733632129462.b0618e5cec1ba295985f16f1dd465d87. service=AccessControlService
2024-12-08T04:28:49,811 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(7999): Registered coprocessor service: region=testExportExpiredSnapshot,1,1733632129462.430b6d8c8c366152be49a2e6dcaf8f87. service=AccessControlService
2024-12-08T04:28:49,811 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911.
2024-12-08T04:28:49,811 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911.
2024-12-08T04:28:49,812 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot b0618e5cec1ba295985f16f1dd465d87
2024-12-08T04:28:49,812 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot 430b6d8c8c366152be49a2e6dcaf8f87
2024-12-08T04:28:49,812 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(894): Instantiated testExportExpiredSnapshot,,1733632129462.b0618e5cec1ba295985f16f1dd465d87.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable
2024-12-08T04:28:49,812 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(894): Instantiated testExportExpiredSnapshot,1,1733632129462.430b6d8c8c366152be49a2e6dcaf8f87.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable
2024-12-08T04:28:49,812 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(7327): checking encryption for b0618e5cec1ba295985f16f1dd465d87
2024-12-08T04:28:49,812 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(7327): checking encryption for 430b6d8c8c366152be49a2e6dcaf8f87
2024-12-08T04:28:49,812 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(7330): checking classloading for b0618e5cec1ba295985f16f1dd465d87
2024-12-08T04:28:49,812 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(7330): checking classloading for 430b6d8c8c366152be49a2e6dcaf8f87
2024-12-08T04:28:49,813 INFO  [StoreOpener-430b6d8c8c366152be49a2e6dcaf8f87-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 430b6d8c8c366152be49a2e6dcaf8f87 
2024-12-08T04:28:49,813 INFO  [StoreOpener-b0618e5cec1ba295985f16f1dd465d87-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region b0618e5cec1ba295985f16f1dd465d87 
2024-12-08T04:28:49,815 INFO  [StoreOpener-430b6d8c8c366152be49a2e6dcaf8f87-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 430b6d8c8c366152be49a2e6dcaf8f87 columnFamilyName cf
2024-12-08T04:28:49,815 INFO  [StoreOpener-b0618e5cec1ba295985f16f1dd465d87-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b0618e5cec1ba295985f16f1dd465d87 columnFamilyName cf
2024-12-08T04:28:49,815 DEBUG [StoreOpener-430b6d8c8c366152be49a2e6dcaf8f87-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker
2024-12-08T04:28:49,815 DEBUG [StoreOpener-b0618e5cec1ba295985f16f1dd465d87-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker
2024-12-08T04:28:49,815 INFO  [StoreOpener-b0618e5cec1ba295985f16f1dd465d87-1 {}] regionserver.HStore(327): Store=b0618e5cec1ba295985f16f1dd465d87/cf,  memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE
2024-12-08T04:28:49,815 INFO  [StoreOpener-430b6d8c8c366152be49a2e6dcaf8f87-1 {}] regionserver.HStore(327): Store=430b6d8c8c366152be49a2e6dcaf8f87/cf,  memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE
2024-12-08T04:28:49,816 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testExportExpiredSnapshot/430b6d8c8c366152be49a2e6dcaf8f87
2024-12-08T04:28:49,816 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testExportExpiredSnapshot/b0618e5cec1ba295985f16f1dd465d87
2024-12-08T04:28:49,816 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testExportExpiredSnapshot/430b6d8c8c366152be49a2e6dcaf8f87
2024-12-08T04:28:49,816 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testExportExpiredSnapshot/b0618e5cec1ba295985f16f1dd465d87
2024-12-08T04:28:49,818 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(1085): writing seq id for 430b6d8c8c366152be49a2e6dcaf8f87
2024-12-08T04:28:49,818 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(1085): writing seq id for b0618e5cec1ba295985f16f1dd465d87
2024-12-08T04:28:49,820 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testExportExpiredSnapshot/430b6d8c8c366152be49a2e6dcaf8f87/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1
2024-12-08T04:28:49,820 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testExportExpiredSnapshot/b0618e5cec1ba295985f16f1dd465d87/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1
2024-12-08T04:28:49,821 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(1102): Opened 430b6d8c8c366152be49a2e6dcaf8f87; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73376795, jitterRate=0.09339945018291473}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1}
2024-12-08T04:28:49,821 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(1102): Opened b0618e5cec1ba295985f16f1dd465d87; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62516363, jitterRate=-0.06843359768390656}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1}
2024-12-08T04:28:49,821 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(1001): Region open journal for b0618e5cec1ba295985f16f1dd465d87:

2024-12-08T04:28:49,821 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(1001): Region open journal for 430b6d8c8c366152be49a2e6dcaf8f87:

2024-12-08T04:28:49,822 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegionServer(2601): Post open deploy tasks for testExportExpiredSnapshot,,1733632129462.b0618e5cec1ba295985f16f1dd465d87., pid=150, masterSystemTime=1733632129807
2024-12-08T04:28:49,822 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegionServer(2601): Post open deploy tasks for testExportExpiredSnapshot,1,1733632129462.430b6d8c8c366152be49a2e6dcaf8f87., pid=151, masterSystemTime=1733632129808
2024-12-08T04:28:49,824 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegionServer(2628): Finished post open deploy task for testExportExpiredSnapshot,,1733632129462.b0618e5cec1ba295985f16f1dd465d87.
2024-12-08T04:28:49,824 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] handler.AssignRegionHandler(164): Opened testExportExpiredSnapshot,,1733632129462.b0618e5cec1ba295985f16f1dd465d87.
2024-12-08T04:28:49,824 INFO  [PEWorker-1 {}] assignment.RegionStateStore(202): pid=148 updating hbase:meta row=b0618e5cec1ba295985f16f1dd465d87, regionState=OPEN, openSeqNum=2, regionLocation=428ded7e54d6,45955,1733631983994
2024-12-08T04:28:49,824 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegionServer(2628): Finished post open deploy task for testExportExpiredSnapshot,1,1733632129462.430b6d8c8c366152be49a2e6dcaf8f87.
2024-12-08T04:28:49,824 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] handler.AssignRegionHandler(164): Opened testExportExpiredSnapshot,1,1733632129462.430b6d8c8c366152be49a2e6dcaf8f87.
2024-12-08T04:28:49,825 INFO  [PEWorker-2 {}] assignment.RegionStateStore(202): pid=149 updating hbase:meta row=430b6d8c8c366152be49a2e6dcaf8f87, regionState=OPEN, openSeqNum=2, regionLocation=428ded7e54d6,46421,1733631984115
2024-12-08T04:28:49,827 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=150, resume processing ppid=148
2024-12-08T04:28:49,827 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=150, ppid=148, state=SUCCESS; OpenRegionProcedure b0618e5cec1ba295985f16f1dd465d87, server=428ded7e54d6,45955,1733631983994 in 170 msec
2024-12-08T04:28:49,828 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=151, resume processing ppid=149
2024-12-08T04:28:49,828 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=151, ppid=149, state=SUCCESS; OpenRegionProcedure 430b6d8c8c366152be49a2e6dcaf8f87, server=428ded7e54d6,46421,1733631984115 in 170 msec
2024-12-08T04:28:49,829 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=148, ppid=147, state=SUCCESS; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=b0618e5cec1ba295985f16f1dd465d87, ASSIGN in 326 msec
2024-12-08T04:28:49,830 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=149, resume processing ppid=147
2024-12-08T04:28:49,830 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=149, ppid=147, state=SUCCESS; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=430b6d8c8c366152be49a2e6dcaf8f87, ASSIGN in 328 msec
2024-12-08T04:28:49,831 INFO  [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE
2024-12-08T04:28:49,831 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733632129831"}]},"ts":"1733632129831"}
2024-12-08T04:28:49,832 INFO  [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testExportExpiredSnapshot, state=ENABLED in hbase:meta
2024-12-08T04:28:49,834 INFO  [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION
2024-12-08T04:28:49,834 DEBUG [PEWorker-5 {}] access.PermissionStorage(175): Writing permission with rowKey testExportExpiredSnapshot jenkins: RWXCA
2024-12-08T04:28:49,836 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41743 {}] access.PermissionStorage(611): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA]
2024-12-08T04:28:49,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:28:49,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46421-0x1006fe072e80002, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:28:49,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:28:49,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:28:49,840 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04
2024-12-08T04:28:49,840 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04
2024-12-08T04:28:49,840 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04
2024-12-08T04:28:49,840 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04
2024-12-08T04:28:49,840 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04
2024-12-08T04:28:49,840 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04
2024-12-08T04:28:49,840 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04
2024-12-08T04:28:49,841 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04
2024-12-08T04:28:49,841 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=147, state=SUCCESS; CreateTableProcedure table=testExportExpiredSnapshot in 377 msec
2024-12-08T04:28:50,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147
2024-12-08T04:28:50,069 INFO  [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testExportExpiredSnapshot, procId: 147 completed
2024-12-08T04:28:50,069 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testExportExpiredSnapshot get assigned. Timeout = 60000ms
2024-12-08T04:28:50,069 INFO  [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1])
2024-12-08T04:28:50,072 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testExportExpiredSnapshot assigned to meta. Checking AM states.
2024-12-08T04:28:50,072 INFO  [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1])
2024-12-08T04:28:50,072 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testExportExpiredSnapshot assigned.
2024-12-08T04:28:50,079 INFO  [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45955 {}] regionserver.HRegion(8254): writing data to region testExportExpiredSnapshot,,1733632129462.b0618e5cec1ba295985f16f1dd465d87. with WAL disabled. Data may be lost in the event of a crash.
2024-12-08T04:28:50,080 INFO  [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46421 {}] regionserver.HRegion(8254): writing data to region testExportExpiredSnapshot,1,1733632129462.430b6d8c8c366152be49a2e6dcaf8f87. with WAL disabled. Data may be lost in the event of a crash.
2024-12-08T04:28:50,082 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testExportExpiredSnapshot
2024-12-08T04:28:50,082 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testExportExpiredSnapshot,,1733632129462.b0618e5cec1ba295985f16f1dd465d87.
2024-12-08T04:28:50,082 INFO  [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1])
2024-12-08T04:28:50,090 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }
2024-12-08T04:28:50,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snapshot-testExportExpiredSnapshot VERSION not specified, setting to 2
2024-12-08T04:28:50,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot
2024-12-08T04:28:50,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1c94bda9 to 127.0.0.1:55878 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2890a35c
2024-12-08T04:28:50,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1869eb34, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null
2024-12-08T04:28:50,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false
2024-12-08T04:28:50,096 INFO  [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38972, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService
2024-12-08T04:28:50,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1c94bda9 to 127.0.0.1:55878
2024-12-08T04:28:50,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(514): Stopping rpc client
2024-12-08T04:28:50,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x318fdc84 to 127.0.0.1:55878 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1ca813aa
2024-12-08T04:28:50,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4bcf7b40, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null
2024-12-08T04:28:50,102 DEBUG [hconnection-0x397cea21-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false
2024-12-08T04:28:50,103 INFO  [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38984, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService
2024-12-08T04:28:50,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x318fdc84 to 127.0.0.1:55878
2024-12-08T04:28:50,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(514): Stopping rpc client
2024-12-08T04:28:50,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] access.PermissionStorage(611): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA]
2024-12-08T04:28:50,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot...
2024-12-08T04:28:50,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure2.ProcedureExecutor(1098): Stored pid=152, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }
2024-12-08T04:28:50,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 152
2024-12-08T04:28:50,107 INFO  [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PREPARE
2024-12-08T04:28:50,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152
2024-12-08T04:28:50,107 INFO  [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PRE_OPERATION
2024-12-08T04:28:50,109 INFO  [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO
2024-12-08T04:28:50,114 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742180_1356 (size=152)
2024-12-08T04:28:50,114 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742180_1356 (size=152)
2024-12-08T04:28:50,115 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742180_1356 (size=152)
2024-12-08T04:28:50,116 INFO  [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS
2024-12-08T04:28:50,116 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=153, ppid=152, state=RUNNABLE; SnapshotRegionProcedure b0618e5cec1ba295985f16f1dd465d87}, {pid=154, ppid=152, state=RUNNABLE; SnapshotRegionProcedure 430b6d8c8c366152be49a2e6dcaf8f87}]
2024-12-08T04:28:50,117 INFO  [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=153, ppid=152, state=RUNNABLE; SnapshotRegionProcedure b0618e5cec1ba295985f16f1dd465d87
2024-12-08T04:28:50,117 INFO  [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=154, ppid=152, state=RUNNABLE; SnapshotRegionProcedure 430b6d8c8c366152be49a2e6dcaf8f87
2024-12-08T04:28:50,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152
2024-12-08T04:28:50,222 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportExpiredSnapshot'
2024-12-08T04:28:50,267 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,46421,1733631984115
2024-12-08T04:28:50,267 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,45955,1733631983994
2024-12-08T04:28:50,268 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46421 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=154
2024-12-08T04:28:50,268 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45955 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=153
2024-12-08T04:28:50,268 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,1,1733632129462.430b6d8c8c366152be49a2e6dcaf8f87.
2024-12-08T04:28:50,268 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,,1733632129462.b0618e5cec1ba295985f16f1dd465d87.
2024-12-08T04:28:50,268 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.HRegion(2837): Flushing b0618e5cec1ba295985f16f1dd465d87 1/1 column families, dataSize=266 B heapSize=832 B
2024-12-08T04:28:50,269 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.HRegion(2837): Flushing 430b6d8c8c366152be49a2e6dcaf8f87 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB
2024-12-08T04:28:50,286 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testExportExpiredSnapshot/b0618e5cec1ba295985f16f1dd465d87/.tmp/cf/86fb103603ae425eb236025fb5707524 is 71, key is 066ded1676b8da069bbc259df578a5b3/cf:q/1733632130079/Put/seqid=0
2024-12-08T04:28:50,286 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testExportExpiredSnapshot/430b6d8c8c366152be49a2e6dcaf8f87/.tmp/cf/239ffa3a8677459db1b2e33f7972d0d6 is 71, key is 132358a0a42c349510dc585e4921ca35/cf:q/1733632130080/Put/seqid=0
2024-12-08T04:28:50,293 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742182_1358 (size=8258)
2024-12-08T04:28:50,293 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742182_1358 (size=8258)
2024-12-08T04:28:50,293 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742182_1358 (size=8258)
2024-12-08T04:28:50,294 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.00 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testExportExpiredSnapshot/430b6d8c8c366152be49a2e6dcaf8f87/.tmp/cf/239ffa3a8677459db1b2e33f7972d0d6
2024-12-08T04:28:50,299 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testExportExpiredSnapshot/430b6d8c8c366152be49a2e6dcaf8f87/.tmp/cf/239ffa3a8677459db1b2e33f7972d0d6 as hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testExportExpiredSnapshot/430b6d8c8c366152be49a2e6dcaf8f87/cf/239ffa3a8677459db1b2e33f7972d0d6
2024-12-08T04:28:50,303 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742181_1357 (size=5356)
2024-12-08T04:28:50,303 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testExportExpiredSnapshot/430b6d8c8c366152be49a2e6dcaf8f87/cf/239ffa3a8677459db1b2e33f7972d0d6, entries=46, sequenceid=5, filesize=8.1 K
2024-12-08T04:28:50,304 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742181_1357 (size=5356)
2024-12-08T04:28:50,304 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742181_1357 (size=5356)
2024-12-08T04:28:50,305 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.HRegion(3040): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for 430b6d8c8c366152be49a2e6dcaf8f87 in 37ms, sequenceid=5, compaction requested=false
2024-12-08T04:28:50,305 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.HRegion(2538): Flush status journal for 430b6d8c8c366152be49a2e6dcaf8f87:

2024-12-08T04:28:50,305 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,1,1733632129462.430b6d8c8c366152be49a2e6dcaf8f87. for snapshot-testExportExpiredSnapshot completed.
2024-12-08T04:28:50,305 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=266 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testExportExpiredSnapshot/b0618e5cec1ba295985f16f1dd465d87/.tmp/cf/86fb103603ae425eb236025fb5707524
2024-12-08T04:28:50,305 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,1,1733632129462.430b6d8c8c366152be49a2e6dcaf8f87.' region-info for snapshot=snapshot-testExportExpiredSnapshot
2024-12-08T04:28:50,305 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] snapshot.SnapshotManifest(246): Creating references for hfiles
2024-12-08T04:28:50,305 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testExportExpiredSnapshot/430b6d8c8c366152be49a2e6dcaf8f87/cf/239ffa3a8677459db1b2e33f7972d0d6] hfiles
2024-12-08T04:28:50,305 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testExportExpiredSnapshot/430b6d8c8c366152be49a2e6dcaf8f87/cf/239ffa3a8677459db1b2e33f7972d0d6 for snapshot=snapshot-testExportExpiredSnapshot
2024-12-08T04:28:50,310 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testExportExpiredSnapshot/b0618e5cec1ba295985f16f1dd465d87/.tmp/cf/86fb103603ae425eb236025fb5707524 as hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testExportExpiredSnapshot/b0618e5cec1ba295985f16f1dd465d87/cf/86fb103603ae425eb236025fb5707524
2024-12-08T04:28:50,312 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742183_1359 (size=103)
2024-12-08T04:28:50,312 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742183_1359 (size=103)
2024-12-08T04:28:50,313 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742183_1359 (size=103)
2024-12-08T04:28:50,313 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,1,1733632129462.430b6d8c8c366152be49a2e6dcaf8f87.
2024-12-08T04:28:50,313 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=154
2024-12-08T04:28:50,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster(4106): Remote procedure done, pid=154
2024-12-08T04:28:50,314 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region 430b6d8c8c366152be49a2e6dcaf8f87
2024-12-08T04:28:50,314 INFO  [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=154, ppid=152, state=RUNNABLE; SnapshotRegionProcedure 430b6d8c8c366152be49a2e6dcaf8f87
2024-12-08T04:28:50,315 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testExportExpiredSnapshot/b0618e5cec1ba295985f16f1dd465d87/cf/86fb103603ae425eb236025fb5707524, entries=4, sequenceid=5, filesize=5.2 K
2024-12-08T04:28:50,316 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=154, ppid=152, state=SUCCESS; SnapshotRegionProcedure 430b6d8c8c366152be49a2e6dcaf8f87 in 199 msec
2024-12-08T04:28:50,316 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.HRegion(3040): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for b0618e5cec1ba295985f16f1dd465d87 in 48ms, sequenceid=5, compaction requested=false
2024-12-08T04:28:50,316 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.HRegion(2538): Flush status journal for b0618e5cec1ba295985f16f1dd465d87:

2024-12-08T04:28:50,316 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,,1733632129462.b0618e5cec1ba295985f16f1dd465d87. for snapshot-testExportExpiredSnapshot completed.
2024-12-08T04:28:50,316 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,,1733632129462.b0618e5cec1ba295985f16f1dd465d87.' region-info for snapshot=snapshot-testExportExpiredSnapshot
2024-12-08T04:28:50,317 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] snapshot.SnapshotManifest(246): Creating references for hfiles
2024-12-08T04:28:50,317 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testExportExpiredSnapshot/b0618e5cec1ba295985f16f1dd465d87/cf/86fb103603ae425eb236025fb5707524] hfiles
2024-12-08T04:28:50,317 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testExportExpiredSnapshot/b0618e5cec1ba295985f16f1dd465d87/cf/86fb103603ae425eb236025fb5707524 for snapshot=snapshot-testExportExpiredSnapshot
2024-12-08T04:28:50,322 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742184_1360 (size=103)
2024-12-08T04:28:50,322 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742184_1360 (size=103)
2024-12-08T04:28:50,323 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742184_1360 (size=103)
2024-12-08T04:28:50,323 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,,1733632129462.b0618e5cec1ba295985f16f1dd465d87.
2024-12-08T04:28:50,323 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=153
2024-12-08T04:28:50,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster(4106): Remote procedure done, pid=153
2024-12-08T04:28:50,323 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region b0618e5cec1ba295985f16f1dd465d87
2024-12-08T04:28:50,324 INFO  [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=153, ppid=152, state=RUNNABLE; SnapshotRegionProcedure b0618e5cec1ba295985f16f1dd465d87
2024-12-08T04:28:50,326 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=153, resume processing ppid=152
2024-12-08T04:28:50,326 INFO  [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS
2024-12-08T04:28:50,326 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=153, ppid=152, state=SUCCESS; SnapshotRegionProcedure b0618e5cec1ba295985f16f1dd465d87 in 208 msec
2024-12-08T04:28:50,327 INFO  [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION
2024-12-08T04:28:50,327 INFO  [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT
2024-12-08T04:28:50,327 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snapshot-testExportExpiredSnapshot
2024-12-08T04:28:50,328 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot
2024-12-08T04:28:50,338 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742185_1361 (size=609)
2024-12-08T04:28:50,338 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742185_1361 (size=609)
2024-12-08T04:28:50,339 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742185_1361 (size=609)
2024-12-08T04:28:50,346 INFO  [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT
2024-12-08T04:28:50,351 INFO  [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT
2024-12-08T04:28:50,352 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/snapshot-testExportExpiredSnapshot
2024-12-08T04:28:50,353 INFO  [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_POST_OPERATION
2024-12-08T04:28:50,353 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 152
2024-12-08T04:28:50,354 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=152, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } in 248 msec
2024-12-08T04:28:50,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152
2024-12-08T04:28:50,409 INFO  [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testExportExpiredSnapshot, procId: 152 completed
2024-12-08T04:28:51,767 INFO  [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733631992429_0006_000001 (auth:SIMPLE) from 127.0.0.1:60456
2024-12-08T04:28:51,778 WARN  [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-0_2/usercache/jenkins/appcache/application_1733631992429_0006/container_1733631992429_0006_01_000001/launch_container.sh]
2024-12-08T04:28:51,778 WARN  [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-0_2/usercache/jenkins/appcache/application_1733631992429_0006/container_1733631992429_0006_01_000001/container_tokens]
2024-12-08T04:28:51,778 WARN  [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-0_2/usercache/jenkins/appcache/application_1733631992429_0006/container_1733631992429_0006_01_000001/sysfs]
2024-12-08T04:28:52,003 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details.
2024-12-08T04:28:52,994 WARN  [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties
2024-12-08T04:28:53,632 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot
2024-12-08T04:28:53,632 INFO  [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer
2024-12-08T04:28:53,633 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot
2024-12-08T04:28:53,633 INFO  [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer
2024-12-08T04:28:53,633 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1
2024-12-08T04:28:53,633 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion
2024-12-08T04:28:59,135 WARN  [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties
2024-12-08T04:29:00,417 INFO  [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632140417
2024-12-08T04:29:00,417 INFO  [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:41407, tgtDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632140417, rawTgtDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632140417, srcFsUri=hdfs://localhost:41407, srcDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720
2024-12-08T04:29:00,456 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:41407, inputRoot=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720
2024-12-08T04:29:00,456 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1548841327_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632140417, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632140417/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot
2024-12-08T04:29:00,458 INFO  [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity.
2024-12-08T04:29:00,459 ERROR [Time-limited test {}] util.AbstractHBaseTool(153): Error running command-line tool
org.apache.hadoop.hbase.snapshot.SnapshotTTLExpiredException: TTL for snapshot 'snapshot-testExportExpiredSnapshot' has already expired.
	at org.apache.hadoop.hbase.snapshot.ExportSnapshot.verifySnapshot(ExportSnapshot.java:948) ~[classes/:?]
	at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1093) ~[classes/:?]
	at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:151) ~[hbase-common-2.7.0-SNAPSHOT.jar:?]
	at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?]
	at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:523) ~[test-classes/:?]
	at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportExpiredSnapshot(TestExportSnapshot.java:315) ~[test-classes/:?]
	at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?]
	at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?]
	at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?]
	at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?]
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2]
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2]
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2]
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2]
	at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2]
	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2]
	at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2]
	at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2]
	at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2]
	at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2]
	at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2]
	at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2]
	at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2]
	at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2]
	at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2]
	at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2]
	at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2]
	at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2]
	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2]
	at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2]
	at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2]
	at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?]
	at java.lang.Thread.run(Thread.java:840) ~[?:?]
2024-12-08T04:29:00,460 INFO  [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportExpiredSnapshot
2024-12-08T04:29:00,460 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportExpiredSnapshot
2024-12-08T04:29:00,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure2.ProcedureExecutor(1098): Stored pid=155, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportExpiredSnapshot
2024-12-08T04:29:00,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155
2024-12-08T04:29:00,463 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733632140463"}]},"ts":"1733632140463"}
2024-12-08T04:29:00,464 INFO  [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLING in hbase:meta
2024-12-08T04:29:00,466 INFO  [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testtb-testExportExpiredSnapshot to state=DISABLING
2024-12-08T04:29:00,467 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=156, ppid=155, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot}]
2024-12-08T04:29:00,468 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=157, ppid=156, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=d014bceee65678ef1e8d4fae8969b87e, UNASSIGN}, {pid=158, ppid=156, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=f790403a55ef7cc492738ed15e40d4cd, UNASSIGN}]
2024-12-08T04:29:00,468 INFO  [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=158, ppid=156, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=f790403a55ef7cc492738ed15e40d4cd, UNASSIGN
2024-12-08T04:29:00,468 INFO  [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=157, ppid=156, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=d014bceee65678ef1e8d4fae8969b87e, UNASSIGN
2024-12-08T04:29:00,469 INFO  [PEWorker-4 {}] assignment.RegionStateStore(202): pid=158 updating hbase:meta row=f790403a55ef7cc492738ed15e40d4cd, regionState=CLOSING, regionLocation=428ded7e54d6,45955,1733631983994
2024-12-08T04:29:00,469 INFO  [PEWorker-5 {}] assignment.RegionStateStore(202): pid=157 updating hbase:meta row=d014bceee65678ef1e8d4fae8969b87e, regionState=CLOSING, regionLocation=428ded7e54d6,46421,1733631984115
2024-12-08T04:29:00,470 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false
2024-12-08T04:29:00,470 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=159, ppid=158, state=RUNNABLE; CloseRegionProcedure f790403a55ef7cc492738ed15e40d4cd, server=428ded7e54d6,45955,1733631983994}]
2024-12-08T04:29:00,471 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false
2024-12-08T04:29:00,471 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=160, ppid=157, state=RUNNABLE; CloseRegionProcedure d014bceee65678ef1e8d4fae8969b87e, server=428ded7e54d6,46421,1733631984115}]
2024-12-08T04:29:00,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155
2024-12-08T04:29:00,622 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,45955,1733631983994
2024-12-08T04:29:00,622 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] handler.UnassignRegionHandler(124): Close f790403a55ef7cc492738ed15e40d4cd
2024-12-08T04:29:00,622 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false
2024-12-08T04:29:00,622 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1681): Closing f790403a55ef7cc492738ed15e40d4cd, disabling compactions & flushes
2024-12-08T04:29:00,622 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1703): Closing region testtb-testExportExpiredSnapshot,1,1733632128161.f790403a55ef7cc492738ed15e40d4cd.
2024-12-08T04:29:00,622 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportExpiredSnapshot,1,1733632128161.f790403a55ef7cc492738ed15e40d4cd.
2024-12-08T04:29:00,622 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportExpiredSnapshot,1,1733632128161.f790403a55ef7cc492738ed15e40d4cd. after waiting 0 ms
2024-12-08T04:29:00,622 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportExpiredSnapshot,1,1733632128161.f790403a55ef7cc492738ed15e40d4cd.
2024-12-08T04:29:00,623 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,46421,1733631984115
2024-12-08T04:29:00,623 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(124): Close d014bceee65678ef1e8d4fae8969b87e
2024-12-08T04:29:00,623 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false
2024-12-08T04:29:00,623 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1681): Closing d014bceee65678ef1e8d4fae8969b87e, disabling compactions & flushes
2024-12-08T04:29:00,623 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1703): Closing region testtb-testExportExpiredSnapshot,,1733632128161.d014bceee65678ef1e8d4fae8969b87e.
2024-12-08T04:29:00,623 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportExpiredSnapshot,,1733632128161.d014bceee65678ef1e8d4fae8969b87e.
2024-12-08T04:29:00,624 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportExpiredSnapshot,,1733632128161.d014bceee65678ef1e8d4fae8969b87e. after waiting 0 ms
2024-12-08T04:29:00,624 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportExpiredSnapshot,,1733632128161.d014bceee65678ef1e8d4fae8969b87e.
2024-12-08T04:29:00,627 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportExpiredSnapshot/f790403a55ef7cc492738ed15e40d4cd/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1
2024-12-08T04:29:00,627 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController
2024-12-08T04:29:00,627 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1922): Closed testtb-testExportExpiredSnapshot,1,1733632128161.f790403a55ef7cc492738ed15e40d4cd.
2024-12-08T04:29:00,627 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1635): Region close journal for f790403a55ef7cc492738ed15e40d4cd:

2024-12-08T04:29:00,627 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportExpiredSnapshot/d014bceee65678ef1e8d4fae8969b87e/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1
2024-12-08T04:29:00,628 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController
2024-12-08T04:29:00,628 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1922): Closed testtb-testExportExpiredSnapshot,,1733632128161.d014bceee65678ef1e8d4fae8969b87e.
2024-12-08T04:29:00,628 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1635): Region close journal for d014bceee65678ef1e8d4fae8969b87e:

2024-12-08T04:29:00,629 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] handler.UnassignRegionHandler(170): Closed f790403a55ef7cc492738ed15e40d4cd
2024-12-08T04:29:00,629 INFO  [PEWorker-2 {}] assignment.RegionStateStore(202): pid=158 updating hbase:meta row=f790403a55ef7cc492738ed15e40d4cd, regionState=CLOSED
2024-12-08T04:29:00,630 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(170): Closed d014bceee65678ef1e8d4fae8969b87e
2024-12-08T04:29:00,630 INFO  [PEWorker-4 {}] assignment.RegionStateStore(202): pid=157 updating hbase:meta row=d014bceee65678ef1e8d4fae8969b87e, regionState=CLOSED
2024-12-08T04:29:00,632 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=159, resume processing ppid=158
2024-12-08T04:29:00,633 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=159, ppid=158, state=SUCCESS; CloseRegionProcedure f790403a55ef7cc492738ed15e40d4cd, server=428ded7e54d6,45955,1733631983994 in 161 msec
2024-12-08T04:29:00,633 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=160, resume processing ppid=157
2024-12-08T04:29:00,633 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=160, ppid=157, state=SUCCESS; CloseRegionProcedure d014bceee65678ef1e8d4fae8969b87e, server=428ded7e54d6,46421,1733631984115 in 161 msec
2024-12-08T04:29:00,633 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=158, ppid=156, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=f790403a55ef7cc492738ed15e40d4cd, UNASSIGN in 164 msec
2024-12-08T04:29:00,635 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=157, resume processing ppid=156
2024-12-08T04:29:00,635 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=157, ppid=156, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=d014bceee65678ef1e8d4fae8969b87e, UNASSIGN in 166 msec
2024-12-08T04:29:00,637 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=156, resume processing ppid=155
2024-12-08T04:29:00,637 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=156, ppid=155, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot in 170 msec
2024-12-08T04:29:00,638 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733632140638"}]},"ts":"1733632140638"}
2024-12-08T04:29:00,639 INFO  [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLED in hbase:meta
2024-12-08T04:29:00,641 INFO  [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testExportExpiredSnapshot to state=DISABLED
2024-12-08T04:29:00,642 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=155, state=SUCCESS; DisableTableProcedure table=testtb-testExportExpiredSnapshot in 181 msec
2024-12-08T04:29:00,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155
2024-12-08T04:29:00,765 INFO  [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportExpiredSnapshot, procId: 155 completed
2024-12-08T04:29:00,765 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportExpiredSnapshot
2024-12-08T04:29:00,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure2.ProcedureExecutor(1098): Stored pid=161, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportExpiredSnapshot
2024-12-08T04:29:00,767 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=161, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot
2024-12-08T04:29:00,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportExpiredSnapshot
2024-12-08T04:29:00,767 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=161, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot
2024-12-08T04:29:00,769 INFO  [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41743 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportExpiredSnapshot
2024-12-08T04:29:00,770 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportExpiredSnapshot/d014bceee65678ef1e8d4fae8969b87e
2024-12-08T04:29:00,771 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportExpiredSnapshot/f790403a55ef7cc492738ed15e40d4cd
2024-12-08T04:29:00,772 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot
2024-12-08T04:29:00,772 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot
2024-12-08T04:29:00,772 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot
2024-12-08T04:29:00,772 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46421-0x1006fe072e80002, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot
2024-12-08T04:29:00,772 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF
2024-12-08T04:29:00,773 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportExpiredSnapshot/f790403a55ef7cc492738ed15e40d4cd/cf, FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportExpiredSnapshot/f790403a55ef7cc492738ed15e40d4cd/recovered.edits]
2024-12-08T04:29:00,773 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF
2024-12-08T04:29:00,773 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF
2024-12-08T04:29:00,773 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF
2024-12-08T04:29:00,774 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportExpiredSnapshot/d014bceee65678ef1e8d4fae8969b87e/cf, FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportExpiredSnapshot/d014bceee65678ef1e8d4fae8969b87e/recovered.edits]
2024-12-08T04:29:00,774 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot
2024-12-08T04:29:00,774 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot
2024-12-08T04:29:00,774 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:29:00,774 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46421-0x1006fe072e80002, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot
2024-12-08T04:29:00,774 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46421-0x1006fe072e80002, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:29:00,774 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot
2024-12-08T04:29:00,774 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:29:00,775 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:29:00,775 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04
2024-12-08T04:29:00,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161
2024-12-08T04:29:00,776 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04
2024-12-08T04:29:00,776 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04
2024-12-08T04:29:00,776 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04
2024-12-08T04:29:00,779 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportExpiredSnapshot/f790403a55ef7cc492738ed15e40d4cd/cf/e19aab93752e4b15a0e83825192a375c to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/archive/data/default/testtb-testExportExpiredSnapshot/f790403a55ef7cc492738ed15e40d4cd/cf/e19aab93752e4b15a0e83825192a375c
2024-12-08T04:29:00,779 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportExpiredSnapshot/d014bceee65678ef1e8d4fae8969b87e/cf/e8f89453e92d4ebb80ce3e279c1f4a73 to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/archive/data/default/testtb-testExportExpiredSnapshot/d014bceee65678ef1e8d4fae8969b87e/cf/e8f89453e92d4ebb80ce3e279c1f4a73
2024-12-08T04:29:00,782 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportExpiredSnapshot/f790403a55ef7cc492738ed15e40d4cd/recovered.edits/9.seqid to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/archive/data/default/testtb-testExportExpiredSnapshot/f790403a55ef7cc492738ed15e40d4cd/recovered.edits/9.seqid
2024-12-08T04:29:00,782 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportExpiredSnapshot/d014bceee65678ef1e8d4fae8969b87e/recovered.edits/9.seqid to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/archive/data/default/testtb-testExportExpiredSnapshot/d014bceee65678ef1e8d4fae8969b87e/recovered.edits/9.seqid
2024-12-08T04:29:00,782 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportExpiredSnapshot/f790403a55ef7cc492738ed15e40d4cd
2024-12-08T04:29:00,783 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportExpiredSnapshot/d014bceee65678ef1e8d4fae8969b87e
2024-12-08T04:29:00,783 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportExpiredSnapshot regions
2024-12-08T04:29:00,784 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=161, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot
2024-12-08T04:29:00,787 WARN  [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportExpiredSnapshot from hbase:meta
2024-12-08T04:29:00,789 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportExpiredSnapshot' descriptor.
2024-12-08T04:29:00,790 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=161, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot
2024-12-08T04:29:00,790 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportExpiredSnapshot' from region states.
2024-12-08T04:29:00,790 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,,1733632128161.d014bceee65678ef1e8d4fae8969b87e.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733632140790"}]},"ts":"9223372036854775807"}
2024-12-08T04:29:00,790 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,1,1733632128161.f790403a55ef7cc492738ed15e40d4cd.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733632140790"}]},"ts":"9223372036854775807"}
2024-12-08T04:29:00,792 INFO  [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META
2024-12-08T04:29:00,792 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => d014bceee65678ef1e8d4fae8969b87e, NAME => 'testtb-testExportExpiredSnapshot,,1733632128161.d014bceee65678ef1e8d4fae8969b87e.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => f790403a55ef7cc492738ed15e40d4cd, NAME => 'testtb-testExportExpiredSnapshot,1,1733632128161.f790403a55ef7cc492738ed15e40d4cd.', STARTKEY => '1', ENDKEY => ''}]
2024-12-08T04:29:00,792 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportExpiredSnapshot' as deleted.
2024-12-08T04:29:00,792 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733632140792"}]},"ts":"9223372036854775807"}
2024-12-08T04:29:00,794 INFO  [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportExpiredSnapshot state from META
2024-12-08T04:29:00,796 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=161, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot
2024-12-08T04:29:00,797 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=161, state=SUCCESS; DeleteTableProcedure table=testtb-testExportExpiredSnapshot in 31 msec
2024-12-08T04:29:00,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161
2024-12-08T04:29:00,877 INFO  [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportExpiredSnapshot, procId: 161 completed
2024-12-08T04:29:00,885 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportExpiredSnapshot"

2024-12-08T04:29:00,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportExpiredSnapshot
2024-12-08T04:29:00,888 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snapshot-testExportExpiredSnapshot"

2024-12-08T04:29:00,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotManager(380): Deleting snapshot: snapshot-testExportExpiredSnapshot
2024-12-08T04:29:00,890 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportExpiredSnapshot"

2024-12-08T04:29:00,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportExpiredSnapshot
2024-12-08T04:29:00,912 INFO  [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportExpiredSnapshot Thread=797 (was 802), OpenFileDescriptor=799 (was 813), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=547 (was 628), ProcessCount=11 (was 20), AvailableMemoryMB=3994 (was 3110) - AvailableMemoryMB LEAK? -
2024-12-08T04:29:00,912 WARN  [Time-limited test {}] hbase.ResourceChecker(130): Thread=797 is superior to 500
2024-12-08T04:29:00,932 INFO  [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testEmptyExportFileSystemState Thread=797, OpenFileDescriptor=799, MaxFileDescriptor=1048576, SystemLoadAverage=547, ProcessCount=11, AvailableMemoryMB=3994
2024-12-08T04:29:00,932 WARN  [Time-limited test {}] hbase.ResourceChecker(130): Thread=797 is superior to 500
2024-12-08T04:29:00,933 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}
2024-12-08T04:29:00,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure2.ProcedureExecutor(1098): Stored pid=162, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testEmptyExportFileSystemState
2024-12-08T04:29:00,935 INFO  [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=162, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION
2024-12-08T04:29:00,935 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker
2024-12-08T04:29:00,935 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default"
qualifier: "testtb-testEmptyExportFileSystemState"
 procId is: 162
2024-12-08T04:29:00,936 INFO  [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=162, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT
2024-12-08T04:29:00,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=162
2024-12-08T04:29:00,942 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742186_1362 (size=412)
2024-12-08T04:29:00,942 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742186_1362 (size=412)
2024-12-08T04:29:00,943 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742186_1362 (size=412)
2024-12-08T04:29:00,944 INFO  [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 3281c8447ff6276b158185802c4fc6ee, NAME => 'testtb-testEmptyExportFileSystemState,,1733632140933.3281c8447ff6276b158185802c4fc6ee.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720
2024-12-08T04:29:00,945 INFO  [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => a3e4f54a2e08ec42b9cce80612e0e7d2, NAME => 'testtb-testEmptyExportFileSystemState,1,1733632140933.a3e4f54a2e08ec42b9cce80612e0e7d2.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720
2024-12-08T04:29:00,951 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742188_1364 (size=73)
2024-12-08T04:29:00,951 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742188_1364 (size=73)
2024-12-08T04:29:00,952 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742188_1364 (size=73)
2024-12-08T04:29:00,952 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742187_1363 (size=73)
2024-12-08T04:29:00,952 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testEmptyExportFileSystemState,,1733632140933.3281c8447ff6276b158185802c4fc6ee.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable
2024-12-08T04:29:00,952 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1681): Closing 3281c8447ff6276b158185802c4fc6ee, disabling compactions & flushes
2024-12-08T04:29:00,952 INFO  [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testEmptyExportFileSystemState,,1733632140933.3281c8447ff6276b158185802c4fc6ee.
2024-12-08T04:29:00,952 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testEmptyExportFileSystemState,,1733632140933.3281c8447ff6276b158185802c4fc6ee.
2024-12-08T04:29:00,953 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testEmptyExportFileSystemState,,1733632140933.3281c8447ff6276b158185802c4fc6ee. after waiting 0 ms
2024-12-08T04:29:00,953 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testEmptyExportFileSystemState,,1733632140933.3281c8447ff6276b158185802c4fc6ee.
2024-12-08T04:29:00,953 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742187_1363 (size=73)
2024-12-08T04:29:00,953 INFO  [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testEmptyExportFileSystemState,,1733632140933.3281c8447ff6276b158185802c4fc6ee.
2024-12-08T04:29:00,953 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1635): Region close journal for 3281c8447ff6276b158185802c4fc6ee:

2024-12-08T04:29:00,953 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742187_1363 (size=73)
2024-12-08T04:29:00,953 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testEmptyExportFileSystemState,1,1733632140933.a3e4f54a2e08ec42b9cce80612e0e7d2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable
2024-12-08T04:29:00,953 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1681): Closing a3e4f54a2e08ec42b9cce80612e0e7d2, disabling compactions & flushes
2024-12-08T04:29:00,953 INFO  [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testEmptyExportFileSystemState,1,1733632140933.a3e4f54a2e08ec42b9cce80612e0e7d2.
2024-12-08T04:29:00,953 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testEmptyExportFileSystemState,1,1733632140933.a3e4f54a2e08ec42b9cce80612e0e7d2.
2024-12-08T04:29:00,954 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1733632140933.a3e4f54a2e08ec42b9cce80612e0e7d2. after waiting 0 ms
2024-12-08T04:29:00,954 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1733632140933.a3e4f54a2e08ec42b9cce80612e0e7d2.
2024-12-08T04:29:00,954 INFO  [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testEmptyExportFileSystemState,1,1733632140933.a3e4f54a2e08ec42b9cce80612e0e7d2.
2024-12-08T04:29:00,954 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1635): Region close journal for a3e4f54a2e08ec42b9cce80612e0e7d2:

2024-12-08T04:29:00,954 INFO  [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=162, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META
2024-12-08T04:29:00,955 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,,1733632140933.3281c8447ff6276b158185802c4fc6ee.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1733632140954"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733632140954"}]},"ts":"1733632140954"}
2024-12-08T04:29:00,955 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,1,1733632140933.a3e4f54a2e08ec42b9cce80612e0e7d2.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1733632140954"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733632140954"}]},"ts":"1733632140954"}
2024-12-08T04:29:00,957 INFO  [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta.
2024-12-08T04:29:00,957 INFO  [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=162, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS
2024-12-08T04:29:00,958 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733632140957"}]},"ts":"1733632140957"}
2024-12-08T04:29:00,959 INFO  [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLING in hbase:meta
2024-12-08T04:29:00,962 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(202): Hosts are {428ded7e54d6=0} racks are {/default-rack=0}
2024-12-08T04:29:00,963 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 is on host 0
2024-12-08T04:29:00,963 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 is on host 0
2024-12-08T04:29:00,963 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 is on host 0
2024-12-08T04:29:00,963 INFO  [PEWorker-5 {}] balancer.BalancerClusterState(314): server 0 is on rack 0
2024-12-08T04:29:00,963 INFO  [PEWorker-5 {}] balancer.BalancerClusterState(314): server 1 is on rack 0
2024-12-08T04:29:00,963 INFO  [PEWorker-5 {}] balancer.BalancerClusterState(314): server 2 is on rack 0
2024-12-08T04:29:00,963 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1
2024-12-08T04:29:00,963 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=163, ppid=162, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=3281c8447ff6276b158185802c4fc6ee, ASSIGN}, {pid=164, ppid=162, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=a3e4f54a2e08ec42b9cce80612e0e7d2, ASSIGN}]
2024-12-08T04:29:00,964 INFO  [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=164, ppid=162, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=a3e4f54a2e08ec42b9cce80612e0e7d2, ASSIGN
2024-12-08T04:29:00,964 INFO  [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=163, ppid=162, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=3281c8447ff6276b158185802c4fc6ee, ASSIGN
2024-12-08T04:29:00,965 INFO  [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=164, ppid=162, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=a3e4f54a2e08ec42b9cce80612e0e7d2, ASSIGN; state=OFFLINE, location=428ded7e54d6,41743,1733631984189; forceNewPlan=false, retain=false
2024-12-08T04:29:00,965 INFO  [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=163, ppid=162, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=3281c8447ff6276b158185802c4fc6ee, ASSIGN; state=OFFLINE, location=428ded7e54d6,45955,1733631983994; forceNewPlan=false, retain=false
2024-12-08T04:29:01,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=162
2024-12-08T04:29:01,115 INFO  [428ded7e54d6:46337 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 
2024-12-08T04:29:01,115 INFO  [PEWorker-4 {}] assignment.RegionStateStore(202): pid=164 updating hbase:meta row=a3e4f54a2e08ec42b9cce80612e0e7d2, regionState=OPENING, regionLocation=428ded7e54d6,41743,1733631984189
2024-12-08T04:29:01,115 INFO  [PEWorker-2 {}] assignment.RegionStateStore(202): pid=163 updating hbase:meta row=3281c8447ff6276b158185802c4fc6ee, regionState=OPENING, regionLocation=428ded7e54d6,45955,1733631983994
2024-12-08T04:29:01,117 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=165, ppid=164, state=RUNNABLE; OpenRegionProcedure a3e4f54a2e08ec42b9cce80612e0e7d2, server=428ded7e54d6,41743,1733631984189}]
2024-12-08T04:29:01,118 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=166, ppid=163, state=RUNNABLE; OpenRegionProcedure 3281c8447ff6276b158185802c4fc6ee, server=428ded7e54d6,45955,1733631983994}]
2024-12-08T04:29:01,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=162
2024-12-08T04:29:01,268 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,41743,1733631984189
2024-12-08T04:29:01,270 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,45955,1733631983994
2024-12-08T04:29:01,271 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] handler.AssignRegionHandler(135): Open testtb-testEmptyExportFileSystemState,1,1733632140933.a3e4f54a2e08ec42b9cce80612e0e7d2.
2024-12-08T04:29:01,272 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(7285): Opening region: {ENCODED => a3e4f54a2e08ec42b9cce80612e0e7d2, NAME => 'testtb-testEmptyExportFileSystemState,1,1733632140933.a3e4f54a2e08ec42b9cce80612e0e7d2.', STARTKEY => '1', ENDKEY => ''}
2024-12-08T04:29:01,272 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] handler.AssignRegionHandler(135): Open testtb-testEmptyExportFileSystemState,,1733632140933.3281c8447ff6276b158185802c4fc6ee.
2024-12-08T04:29:01,272 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(7285): Opening region: {ENCODED => 3281c8447ff6276b158185802c4fc6ee, NAME => 'testtb-testEmptyExportFileSystemState,,1733632140933.3281c8447ff6276b158185802c4fc6ee.', STARTKEY => '', ENDKEY => '1'}
2024-12-08T04:29:01,272 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,1,1733632140933.a3e4f54a2e08ec42b9cce80612e0e7d2. service=AccessControlService
2024-12-08T04:29:01,272 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,,1733632140933.3281c8447ff6276b158185802c4fc6ee. service=AccessControlService
2024-12-08T04:29:01,272 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911.
2024-12-08T04:29:01,272 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911.
2024-12-08T04:29:01,272 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState a3e4f54a2e08ec42b9cce80612e0e7d2
2024-12-08T04:29:01,272 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(894): Instantiated testtb-testEmptyExportFileSystemState,1,1733632140933.a3e4f54a2e08ec42b9cce80612e0e7d2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable
2024-12-08T04:29:01,272 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(7327): checking encryption for a3e4f54a2e08ec42b9cce80612e0e7d2
2024-12-08T04:29:01,272 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(7330): checking classloading for a3e4f54a2e08ec42b9cce80612e0e7d2
2024-12-08T04:29:01,272 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState 3281c8447ff6276b158185802c4fc6ee
2024-12-08T04:29:01,272 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(894): Instantiated testtb-testEmptyExportFileSystemState,,1733632140933.3281c8447ff6276b158185802c4fc6ee.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable
2024-12-08T04:29:01,273 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(7327): checking encryption for 3281c8447ff6276b158185802c4fc6ee
2024-12-08T04:29:01,273 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(7330): checking classloading for 3281c8447ff6276b158185802c4fc6ee
2024-12-08T04:29:01,274 INFO  [StoreOpener-a3e4f54a2e08ec42b9cce80612e0e7d2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region a3e4f54a2e08ec42b9cce80612e0e7d2 
2024-12-08T04:29:01,274 INFO  [StoreOpener-3281c8447ff6276b158185802c4fc6ee-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 3281c8447ff6276b158185802c4fc6ee 
2024-12-08T04:29:01,275 INFO  [StoreOpener-3281c8447ff6276b158185802c4fc6ee-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3281c8447ff6276b158185802c4fc6ee columnFamilyName cf
2024-12-08T04:29:01,275 DEBUG [StoreOpener-3281c8447ff6276b158185802c4fc6ee-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker
2024-12-08T04:29:01,275 INFO  [StoreOpener-a3e4f54a2e08ec42b9cce80612e0e7d2-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a3e4f54a2e08ec42b9cce80612e0e7d2 columnFamilyName cf
2024-12-08T04:29:01,275 DEBUG [StoreOpener-a3e4f54a2e08ec42b9cce80612e0e7d2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker
2024-12-08T04:29:01,275 INFO  [StoreOpener-3281c8447ff6276b158185802c4fc6ee-1 {}] regionserver.HStore(327): Store=3281c8447ff6276b158185802c4fc6ee/cf,  memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE
2024-12-08T04:29:01,275 INFO  [StoreOpener-a3e4f54a2e08ec42b9cce80612e0e7d2-1 {}] regionserver.HStore(327): Store=a3e4f54a2e08ec42b9cce80612e0e7d2/cf,  memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE
2024-12-08T04:29:01,276 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testEmptyExportFileSystemState/3281c8447ff6276b158185802c4fc6ee
2024-12-08T04:29:01,276 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testEmptyExportFileSystemState/a3e4f54a2e08ec42b9cce80612e0e7d2
2024-12-08T04:29:01,277 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testEmptyExportFileSystemState/3281c8447ff6276b158185802c4fc6ee
2024-12-08T04:29:01,277 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testEmptyExportFileSystemState/a3e4f54a2e08ec42b9cce80612e0e7d2
2024-12-08T04:29:01,278 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(1085): writing seq id for a3e4f54a2e08ec42b9cce80612e0e7d2
2024-12-08T04:29:01,279 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(1085): writing seq id for 3281c8447ff6276b158185802c4fc6ee
2024-12-08T04:29:01,280 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testEmptyExportFileSystemState/3281c8447ff6276b158185802c4fc6ee/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1
2024-12-08T04:29:01,281 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testEmptyExportFileSystemState/a3e4f54a2e08ec42b9cce80612e0e7d2/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1
2024-12-08T04:29:01,281 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(1102): Opened 3281c8447ff6276b158185802c4fc6ee; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73565975, jitterRate=0.09621845185756683}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1}
2024-12-08T04:29:01,281 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(1102): Opened a3e4f54a2e08ec42b9cce80612e0e7d2; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71636830, jitterRate=0.0674719512462616}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1}
2024-12-08T04:29:01,281 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(1001): Region open journal for 3281c8447ff6276b158185802c4fc6ee:

2024-12-08T04:29:01,281 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(1001): Region open journal for a3e4f54a2e08ec42b9cce80612e0e7d2:

2024-12-08T04:29:01,282 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testEmptyExportFileSystemState,,1733632140933.3281c8447ff6276b158185802c4fc6ee., pid=166, masterSystemTime=1733632141269
2024-12-08T04:29:01,282 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testEmptyExportFileSystemState,1,1733632140933.a3e4f54a2e08ec42b9cce80612e0e7d2., pid=165, masterSystemTime=1733632141268
2024-12-08T04:29:01,284 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testEmptyExportFileSystemState,,1733632140933.3281c8447ff6276b158185802c4fc6ee.
2024-12-08T04:29:01,284 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] handler.AssignRegionHandler(164): Opened testtb-testEmptyExportFileSystemState,,1733632140933.3281c8447ff6276b158185802c4fc6ee.
2024-12-08T04:29:01,284 INFO  [PEWorker-3 {}] assignment.RegionStateStore(202): pid=163 updating hbase:meta row=3281c8447ff6276b158185802c4fc6ee, regionState=OPEN, openSeqNum=2, regionLocation=428ded7e54d6,45955,1733631983994
2024-12-08T04:29:01,284 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testEmptyExportFileSystemState,1,1733632140933.a3e4f54a2e08ec42b9cce80612e0e7d2.
2024-12-08T04:29:01,284 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] handler.AssignRegionHandler(164): Opened testtb-testEmptyExportFileSystemState,1,1733632140933.a3e4f54a2e08ec42b9cce80612e0e7d2.
2024-12-08T04:29:01,285 INFO  [PEWorker-4 {}] assignment.RegionStateStore(202): pid=164 updating hbase:meta row=a3e4f54a2e08ec42b9cce80612e0e7d2, regionState=OPEN, openSeqNum=2, regionLocation=428ded7e54d6,41743,1733631984189
2024-12-08T04:29:01,288 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=166, resume processing ppid=163
2024-12-08T04:29:01,288 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=166, ppid=163, state=SUCCESS; OpenRegionProcedure 3281c8447ff6276b158185802c4fc6ee, server=428ded7e54d6,45955,1733631983994 in 169 msec
2024-12-08T04:29:01,289 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=165, resume processing ppid=164
2024-12-08T04:29:01,289 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=163, ppid=162, state=SUCCESS; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=3281c8447ff6276b158185802c4fc6ee, ASSIGN in 325 msec
2024-12-08T04:29:01,289 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=165, ppid=164, state=SUCCESS; OpenRegionProcedure a3e4f54a2e08ec42b9cce80612e0e7d2, server=428ded7e54d6,41743,1733631984189 in 170 msec
2024-12-08T04:29:01,290 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=164, resume processing ppid=162
2024-12-08T04:29:01,290 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=164, ppid=162, state=SUCCESS; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=a3e4f54a2e08ec42b9cce80612e0e7d2, ASSIGN in 326 msec
2024-12-08T04:29:01,291 INFO  [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=162, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE
2024-12-08T04:29:01,291 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733632141291"}]},"ts":"1733632141291"}
2024-12-08T04:29:01,292 INFO  [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLED in hbase:meta
2024-12-08T04:29:01,294 INFO  [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=162, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION
2024-12-08T04:29:01,294 DEBUG [PEWorker-1 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testEmptyExportFileSystemState jenkins: RWXCA
2024-12-08T04:29:01,296 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41743 {}] access.PermissionStorage(611): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA]
2024-12-08T04:29:01,302 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:29:01,302 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:29:01,302 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:29:01,302 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46421-0x1006fe072e80002, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:29:01,304 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04
2024-12-08T04:29:01,304 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04
2024-12-08T04:29:01,304 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04
2024-12-08T04:29:01,305 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04
2024-12-08T04:29:01,305 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04
2024-12-08T04:29:01,305 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04
2024-12-08T04:29:01,305 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04
2024-12-08T04:29:01,305 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04
2024-12-08T04:29:01,305 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=162, state=SUCCESS; CreateTableProcedure table=testtb-testEmptyExportFileSystemState in 370 msec
2024-12-08T04:29:01,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=162
2024-12-08T04:29:01,539 INFO  [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testEmptyExportFileSystemState, procId: 162 completed
2024-12-08T04:29:01,539 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testEmptyExportFileSystemState get assigned. Timeout = 60000ms
2024-12-08T04:29:01,539 INFO  [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1])
2024-12-08T04:29:01,542 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testEmptyExportFileSystemState assigned to meta. Checking AM states.
2024-12-08T04:29:01,542 INFO  [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1])
2024-12-08T04:29:01,542 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testEmptyExportFileSystemState assigned.
2024-12-08T04:29:01,544 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }
2024-12-08T04:29:01,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733632141544 (current time:1733632141544).
2024-12-08T04:29:01,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0
2024-12-08T04:29:01,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2
2024-12-08T04:29:01,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot
2024-12-08T04:29:01,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3c1d906d to 127.0.0.1:55878 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@171cef5f
2024-12-08T04:29:01,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f455e67, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null
2024-12-08T04:29:01,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false
2024-12-08T04:29:01,550 INFO  [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60042, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService
2024-12-08T04:29:01,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3c1d906d to 127.0.0.1:55878
2024-12-08T04:29:01,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(514): Stopping rpc client
2024-12-08T04:29:01,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x21e12c2d to 127.0.0.1:55878 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@51775959
2024-12-08T04:29:01,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@44549376, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null
2024-12-08T04:29:01,556 DEBUG [hconnection-0x4fd4ac4d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false
2024-12-08T04:29:01,556 INFO  [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60050, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService
2024-12-08T04:29:01,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x21e12c2d to 127.0.0.1:55878
2024-12-08T04:29:01,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(514): Stopping rpc client
2024-12-08T04:29:01,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] access.PermissionStorage(611): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA]
2024-12-08T04:29:01,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot...
2024-12-08T04:29:01,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure2.ProcedureExecutor(1098): Stored pid=167, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }
2024-12-08T04:29:01,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 167
2024-12-08T04:29:01,561 INFO  [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE
2024-12-08T04:29:01,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167
2024-12-08T04:29:01,561 INFO  [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION
2024-12-08T04:29:01,563 INFO  [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO
2024-12-08T04:29:01,568 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742189_1365 (size=185)
2024-12-08T04:29:01,568 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742189_1365 (size=185)
2024-12-08T04:29:01,569 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742189_1365 (size=185)
2024-12-08T04:29:01,569 INFO  [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS
2024-12-08T04:29:01,570 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=168, ppid=167, state=RUNNABLE; SnapshotRegionProcedure 3281c8447ff6276b158185802c4fc6ee}, {pid=169, ppid=167, state=RUNNABLE; SnapshotRegionProcedure a3e4f54a2e08ec42b9cce80612e0e7d2}]
2024-12-08T04:29:01,570 INFO  [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=168, ppid=167, state=RUNNABLE; SnapshotRegionProcedure 3281c8447ff6276b158185802c4fc6ee
2024-12-08T04:29:01,570 INFO  [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=169, ppid=167, state=RUNNABLE; SnapshotRegionProcedure a3e4f54a2e08ec42b9cce80612e0e7d2
2024-12-08T04:29:01,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167
2024-12-08T04:29:01,721 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,45955,1733631983994
2024-12-08T04:29:01,721 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,41743,1733631984189
2024-12-08T04:29:01,722 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41743 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=169
2024-12-08T04:29:01,722 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45955 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=168
2024-12-08T04:29:01,722 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1733632140933.3281c8447ff6276b158185802c4fc6ee.
2024-12-08T04:29:01,722 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1733632140933.a3e4f54a2e08ec42b9cce80612e0e7d2.
2024-12-08T04:29:01,722 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] regionserver.HRegion(2538): Flush status journal for a3e4f54a2e08ec42b9cce80612e0e7d2:

2024-12-08T04:29:01,722 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1733632140933.a3e4f54a2e08ec42b9cce80612e0e7d2. for emptySnaptb0-testEmptyExportFileSystemState completed.
2024-12-08T04:29:01,722 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] regionserver.HRegion(2538): Flush status journal for 3281c8447ff6276b158185802c4fc6ee:

2024-12-08T04:29:01,722 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1733632140933.3281c8447ff6276b158185802c4fc6ee. for emptySnaptb0-testEmptyExportFileSystemState completed.
2024-12-08T04:29:01,722 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1733632140933.a3e4f54a2e08ec42b9cce80612e0e7d2.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState
2024-12-08T04:29:01,722 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] snapshot.SnapshotManifest(246): Creating references for hfiles
2024-12-08T04:29:01,722 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles
2024-12-08T04:29:01,722 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1733632140933.3281c8447ff6276b158185802c4fc6ee.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState
2024-12-08T04:29:01,722 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] snapshot.SnapshotManifest(246): Creating references for hfiles
2024-12-08T04:29:01,722 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles
2024-12-08T04:29:01,731 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742191_1367 (size=76)
2024-12-08T04:29:01,731 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742190_1366 (size=76)
2024-12-08T04:29:01,731 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742191_1367 (size=76)
2024-12-08T04:29:01,732 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742190_1366 (size=76)
2024-12-08T04:29:01,732 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742191_1367 (size=76)
2024-12-08T04:29:01,732 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742190_1366 (size=76)
2024-12-08T04:29:01,732 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1733632140933.3281c8447ff6276b158185802c4fc6ee.
2024-12-08T04:29:01,732 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=168
2024-12-08T04:29:01,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster(4106): Remote procedure done, pid=168
2024-12-08T04:29:01,732 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region 3281c8447ff6276b158185802c4fc6ee
2024-12-08T04:29:01,733 INFO  [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=168, ppid=167, state=RUNNABLE; SnapshotRegionProcedure 3281c8447ff6276b158185802c4fc6ee
2024-12-08T04:29:01,734 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=168, ppid=167, state=SUCCESS; SnapshotRegionProcedure 3281c8447ff6276b158185802c4fc6ee in 164 msec
2024-12-08T04:29:01,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167
2024-12-08T04:29:02,132 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1733632140933.a3e4f54a2e08ec42b9cce80612e0e7d2.
2024-12-08T04:29:02,132 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=169
2024-12-08T04:29:02,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster(4106): Remote procedure done, pid=169
2024-12-08T04:29:02,132 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region a3e4f54a2e08ec42b9cce80612e0e7d2
2024-12-08T04:29:02,132 INFO  [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=169, ppid=167, state=RUNNABLE; SnapshotRegionProcedure a3e4f54a2e08ec42b9cce80612e0e7d2
2024-12-08T04:29:02,135 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=169, resume processing ppid=167
2024-12-08T04:29:02,135 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=169, ppid=167, state=SUCCESS; SnapshotRegionProcedure a3e4f54a2e08ec42b9cce80612e0e7d2 in 564 msec
2024-12-08T04:29:02,135 INFO  [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS
2024-12-08T04:29:02,135 INFO  [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION
2024-12-08T04:29:02,136 INFO  [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT
2024-12-08T04:29:02,136 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testEmptyExportFileSystemState
2024-12-08T04:29:02,137 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState
2024-12-08T04:29:02,144 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742192_1368 (size=567)
2024-12-08T04:29:02,144 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742192_1368 (size=567)
2024-12-08T04:29:02,144 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742192_1368 (size=567)
2024-12-08T04:29:02,146 INFO  [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT
2024-12-08T04:29:02,149 INFO  [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT
2024-12-08T04:29:02,150 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState
2024-12-08T04:29:02,152 INFO  [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION
2024-12-08T04:29:02,152 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 167
2024-12-08T04:29:02,153 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 593 msec
2024-12-08T04:29:02,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167
2024-12-08T04:29:02,163 INFO  [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState, procId: 167 completed
2024-12-08T04:29:02,169 INFO  [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45955 {}] regionserver.HRegion(8254): writing data to region testtb-testEmptyExportFileSystemState,,1733632140933.3281c8447ff6276b158185802c4fc6ee. with WAL disabled. Data may be lost in the event of a crash.
2024-12-08T04:29:02,170 INFO  [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41743 {}] regionserver.HRegion(8254): writing data to region testtb-testEmptyExportFileSystemState,1,1733632140933.a3e4f54a2e08ec42b9cce80612e0e7d2. with WAL disabled. Data may be lost in the event of a crash.
2024-12-08T04:29:02,173 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testEmptyExportFileSystemState
2024-12-08T04:29:02,173 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testEmptyExportFileSystemState,,1733632140933.3281c8447ff6276b158185802c4fc6ee.
2024-12-08T04:29:02,173 INFO  [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1])
2024-12-08T04:29:02,183 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }
2024-12-08T04:29:02,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733632142183 (current time:1733632142183).
2024-12-08T04:29:02,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0
2024-12-08T04:29:02,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2
2024-12-08T04:29:02,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot
2024-12-08T04:29:02,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6c85bc39 to 127.0.0.1:55878 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@659c320
2024-12-08T04:29:02,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2afb0b4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null
2024-12-08T04:29:02,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false
2024-12-08T04:29:02,190 INFO  [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60058, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService
2024-12-08T04:29:02,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6c85bc39 to 127.0.0.1:55878
2024-12-08T04:29:02,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(514): Stopping rpc client
2024-12-08T04:29:02,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1df19746 to 127.0.0.1:55878 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5579071b
2024-12-08T04:29:02,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2a2c3710, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null
2024-12-08T04:29:02,195 DEBUG [hconnection-0x526416d0-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false
2024-12-08T04:29:02,196 INFO  [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60070, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService
2024-12-08T04:29:02,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1df19746 to 127.0.0.1:55878
2024-12-08T04:29:02,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(514): Stopping rpc client
2024-12-08T04:29:02,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] access.PermissionStorage(611): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA]
2024-12-08T04:29:02,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot...
2024-12-08T04:29:02,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure2.ProcedureExecutor(1098): Stored pid=170, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }
2024-12-08T04:29:02,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 170
2024-12-08T04:29:02,201 INFO  [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE
2024-12-08T04:29:02,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=170
2024-12-08T04:29:02,201 INFO  [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION
2024-12-08T04:29:02,203 INFO  [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO
2024-12-08T04:29:02,209 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742193_1369 (size=180)
2024-12-08T04:29:02,209 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742193_1369 (size=180)
2024-12-08T04:29:02,209 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742193_1369 (size=180)
2024-12-08T04:29:02,210 INFO  [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS
2024-12-08T04:29:02,210 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=171, ppid=170, state=RUNNABLE; SnapshotRegionProcedure 3281c8447ff6276b158185802c4fc6ee}, {pid=172, ppid=170, state=RUNNABLE; SnapshotRegionProcedure a3e4f54a2e08ec42b9cce80612e0e7d2}]
2024-12-08T04:29:02,211 INFO  [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=171, ppid=170, state=RUNNABLE; SnapshotRegionProcedure 3281c8447ff6276b158185802c4fc6ee
2024-12-08T04:29:02,211 INFO  [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=172, ppid=170, state=RUNNABLE; SnapshotRegionProcedure a3e4f54a2e08ec42b9cce80612e0e7d2
2024-12-08T04:29:02,222 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testEmptyExportFileSystemState'
2024-12-08T04:29:02,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=170
2024-12-08T04:29:02,362 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,41743,1733631984189
2024-12-08T04:29:02,362 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,45955,1733631983994
2024-12-08T04:29:02,362 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41743 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=172
2024-12-08T04:29:02,362 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45955 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=171
2024-12-08T04:29:02,363 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1733632140933.a3e4f54a2e08ec42b9cce80612e0e7d2.
2024-12-08T04:29:02,363 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1733632140933.3281c8447ff6276b158185802c4fc6ee.
2024-12-08T04:29:02,363 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.HRegion(2837): Flushing 3281c8447ff6276b158185802c4fc6ee 1/1 column families, dataSize=132 B heapSize=544 B
2024-12-08T04:29:02,363 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.HRegion(2837): Flushing a3e4f54a2e08ec42b9cce80612e0e7d2 1/1 column families, dataSize=3.13 KB heapSize=7 KB
2024-12-08T04:29:02,379 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testEmptyExportFileSystemState/3281c8447ff6276b158185802c4fc6ee/.tmp/cf/8af06ebfefad4612b21117e6d09f476e is 71, key is 084c96c34e36f165c184515c5e6a3e27/cf:q/1733632142169/Put/seqid=0
2024-12-08T04:29:02,379 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testEmptyExportFileSystemState/a3e4f54a2e08ec42b9cce80612e0e7d2/.tmp/cf/8170fcb224bf489aa543b88f20dc033a is 71, key is 116dad391e5cfad676d859f5aa862ecc/cf:q/1733632142170/Put/seqid=0
2024-12-08T04:29:02,386 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742195_1371 (size=5216)
2024-12-08T04:29:02,386 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742194_1370 (size=8392)
2024-12-08T04:29:02,386 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742195_1371 (size=5216)
2024-12-08T04:29:02,387 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742195_1371 (size=5216)
2024-12-08T04:29:02,387 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742194_1370 (size=8392)
2024-12-08T04:29:02,387 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=132 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testEmptyExportFileSystemState/3281c8447ff6276b158185802c4fc6ee/.tmp/cf/8af06ebfefad4612b21117e6d09f476e
2024-12-08T04:29:02,387 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742194_1370 (size=8392)
2024-12-08T04:29:02,388 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.13 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testEmptyExportFileSystemState/a3e4f54a2e08ec42b9cce80612e0e7d2/.tmp/cf/8170fcb224bf489aa543b88f20dc033a
2024-12-08T04:29:02,393 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testEmptyExportFileSystemState/3281c8447ff6276b158185802c4fc6ee/.tmp/cf/8af06ebfefad4612b21117e6d09f476e as hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testEmptyExportFileSystemState/3281c8447ff6276b158185802c4fc6ee/cf/8af06ebfefad4612b21117e6d09f476e
2024-12-08T04:29:02,393 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testEmptyExportFileSystemState/a3e4f54a2e08ec42b9cce80612e0e7d2/.tmp/cf/8170fcb224bf489aa543b88f20dc033a as hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testEmptyExportFileSystemState/a3e4f54a2e08ec42b9cce80612e0e7d2/cf/8170fcb224bf489aa543b88f20dc033a
2024-12-08T04:29:02,397 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testEmptyExportFileSystemState/3281c8447ff6276b158185802c4fc6ee/cf/8af06ebfefad4612b21117e6d09f476e, entries=2, sequenceid=6, filesize=5.1 K
2024-12-08T04:29:02,398 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testEmptyExportFileSystemState/a3e4f54a2e08ec42b9cce80612e0e7d2/cf/8170fcb224bf489aa543b88f20dc033a, entries=48, sequenceid=6, filesize=8.2 K
2024-12-08T04:29:02,399 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.HRegion(3040): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for 3281c8447ff6276b158185802c4fc6ee in 35ms, sequenceid=6, compaction requested=false
2024-12-08T04:29:02,399 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.HRegion(3040): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for a3e4f54a2e08ec42b9cce80612e0e7d2 in 35ms, sequenceid=6, compaction requested=false
2024-12-08T04:29:02,399 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.HRegion(2538): Flush status journal for 3281c8447ff6276b158185802c4fc6ee:

2024-12-08T04:29:02,399 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.HRegion(2538): Flush status journal for a3e4f54a2e08ec42b9cce80612e0e7d2:

2024-12-08T04:29:02,399 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1733632140933.3281c8447ff6276b158185802c4fc6ee. for snaptb0-testEmptyExportFileSystemState completed.
2024-12-08T04:29:02,399 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1733632140933.a3e4f54a2e08ec42b9cce80612e0e7d2. for snaptb0-testEmptyExportFileSystemState completed.
2024-12-08T04:29:02,399 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1733632140933.a3e4f54a2e08ec42b9cce80612e0e7d2.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState
2024-12-08T04:29:02,399 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1733632140933.3281c8447ff6276b158185802c4fc6ee.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState
2024-12-08T04:29:02,399 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] snapshot.SnapshotManifest(246): Creating references for hfiles
2024-12-08T04:29:02,399 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] snapshot.SnapshotManifest(246): Creating references for hfiles
2024-12-08T04:29:02,399 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testEmptyExportFileSystemState/a3e4f54a2e08ec42b9cce80612e0e7d2/cf/8170fcb224bf489aa543b88f20dc033a] hfiles
2024-12-08T04:29:02,399 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testEmptyExportFileSystemState/a3e4f54a2e08ec42b9cce80612e0e7d2/cf/8170fcb224bf489aa543b88f20dc033a for snapshot=snaptb0-testEmptyExportFileSystemState
2024-12-08T04:29:02,399 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testEmptyExportFileSystemState/3281c8447ff6276b158185802c4fc6ee/cf/8af06ebfefad4612b21117e6d09f476e] hfiles
2024-12-08T04:29:02,399 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testEmptyExportFileSystemState/3281c8447ff6276b158185802c4fc6ee/cf/8af06ebfefad4612b21117e6d09f476e for snapshot=snaptb0-testEmptyExportFileSystemState
2024-12-08T04:29:02,409 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742196_1372 (size=115)
2024-12-08T04:29:02,409 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742196_1372 (size=115)
2024-12-08T04:29:02,409 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742196_1372 (size=115)
2024-12-08T04:29:02,409 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1733632140933.3281c8447ff6276b158185802c4fc6ee.
2024-12-08T04:29:02,410 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=171
2024-12-08T04:29:02,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster(4106): Remote procedure done, pid=171
2024-12-08T04:29:02,410 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region 3281c8447ff6276b158185802c4fc6ee
2024-12-08T04:29:02,410 INFO  [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=171, ppid=170, state=RUNNABLE; SnapshotRegionProcedure 3281c8447ff6276b158185802c4fc6ee
2024-12-08T04:29:02,412 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=171, ppid=170, state=SUCCESS; SnapshotRegionProcedure 3281c8447ff6276b158185802c4fc6ee in 201 msec
2024-12-08T04:29:02,416 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742197_1373 (size=115)
2024-12-08T04:29:02,416 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742197_1373 (size=115)
2024-12-08T04:29:02,417 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742197_1373 (size=115)
2024-12-08T04:29:02,417 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1733632140933.a3e4f54a2e08ec42b9cce80612e0e7d2.
2024-12-08T04:29:02,417 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=172
2024-12-08T04:29:02,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster(4106): Remote procedure done, pid=172
2024-12-08T04:29:02,417 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region a3e4f54a2e08ec42b9cce80612e0e7d2
2024-12-08T04:29:02,417 INFO  [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=172, ppid=170, state=RUNNABLE; SnapshotRegionProcedure a3e4f54a2e08ec42b9cce80612e0e7d2
2024-12-08T04:29:02,419 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=172, resume processing ppid=170
2024-12-08T04:29:02,419 INFO  [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS
2024-12-08T04:29:02,419 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=172, ppid=170, state=SUCCESS; SnapshotRegionProcedure a3e4f54a2e08ec42b9cce80612e0e7d2 in 208 msec
2024-12-08T04:29:02,420 INFO  [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION
2024-12-08T04:29:02,420 INFO  [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT
2024-12-08T04:29:02,420 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testEmptyExportFileSystemState
2024-12-08T04:29:02,421 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState
2024-12-08T04:29:02,432 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742198_1374 (size=645)
2024-12-08T04:29:02,432 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742198_1374 (size=645)
2024-12-08T04:29:02,433 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742198_1374 (size=645)
2024-12-08T04:29:02,435 INFO  [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT
2024-12-08T04:29:02,440 INFO  [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT
2024-12-08T04:29:02,440 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/snaptb0-testEmptyExportFileSystemState
2024-12-08T04:29:02,442 INFO  [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION
2024-12-08T04:29:02,442 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 170
2024-12-08T04:29:02,443 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=170, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 243 msec
2024-12-08T04:29:02,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=170
2024-12-08T04:29:02,503 INFO  [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState, procId: 170 completed
2024-12-08T04:29:02,503 INFO  [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632142503
2024-12-08T04:29:02,503 INFO  [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:41407, tgtDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632142503, rawTgtDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632142503, srcFsUri=hdfs://localhost:41407, srcDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720
2024-12-08T04:29:02,543 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:41407, inputRoot=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720
2024-12-08T04:29:02,543 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1548841327_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632142503, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632142503/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState
2024-12-08T04:29:02,544 INFO  [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity.
2024-12-08T04:29:02,548 INFO  [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632142503/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState
2024-12-08T04:29:02,557 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742199_1375 (size=185)
2024-12-08T04:29:02,557 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742199_1375 (size=185)
2024-12-08T04:29:02,557 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742199_1375 (size=185)
2024-12-08T04:29:02,557 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742200_1376 (size=567)
2024-12-08T04:29:02,558 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742200_1376 (size=567)
2024-12-08T04:29:02,558 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742200_1376 (size=567)
2024-12-08T04:29:02,561 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar
2024-12-08T04:29:02,561 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar
2024-12-08T04:29:02,561 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar
2024-12-08T04:29:02,561 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar
2024-12-08T04:29:03,558 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/hadoop-4225957078368909392.jar
2024-12-08T04:29:03,559 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar
2024-12-08T04:29:03,559 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar
2024-12-08T04:29:03,629 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/hadoop-17716332786512989451.jar
2024-12-08T04:29:03,630 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar
2024-12-08T04:29:03,630 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar
2024-12-08T04:29:03,630 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar
2024-12-08T04:29:03,630 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar
2024-12-08T04:29:03,630 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar
2024-12-08T04:29:03,631 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar
2024-12-08T04:29:03,631 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar
2024-12-08T04:29:03,631 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar
2024-12-08T04:29:03,631 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar
2024-12-08T04:29:03,631 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar
2024-12-08T04:29:03,631 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar
2024-12-08T04:29:03,632 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar
2024-12-08T04:29:03,632 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar
2024-12-08T04:29:03,632 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar
2024-12-08T04:29:03,632 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState
2024-12-08T04:29:03,632 INFO  [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState Metrics about Tables on a single HBase RegionServer
2024-12-08T04:29:03,632 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar
2024-12-08T04:29:03,632 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar
2024-12-08T04:29:03,633 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar
2024-12-08T04:29:03,633 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot
2024-12-08T04:29:03,633 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar
2024-12-08T04:29:03,633 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar
2024-12-08T04:29:03,633 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar
2024-12-08T04:29:03,633 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar
2024-12-08T04:29:03,634 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar
2024-12-08T04:29:03,634 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar
2024-12-08T04:29:03,634 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar
2024-12-08T04:29:03,634 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar
2024-12-08T04:29:03,687 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742201_1377 (size=127628)
2024-12-08T04:29:03,687 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742201_1377 (size=127628)
2024-12-08T04:29:03,688 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742201_1377 (size=127628)
2024-12-08T04:29:03,701 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742202_1378 (size=2172101)
2024-12-08T04:29:03,701 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742202_1378 (size=2172101)
2024-12-08T04:29:03,701 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742202_1378 (size=2172101)
2024-12-08T04:29:03,718 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742203_1379 (size=213228)
2024-12-08T04:29:03,718 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742203_1379 (size=213228)
2024-12-08T04:29:03,718 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742203_1379 (size=213228)
2024-12-08T04:29:03,733 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742204_1380 (size=1877034)
2024-12-08T04:29:03,733 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742204_1380 (size=1877034)
2024-12-08T04:29:03,734 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742204_1380 (size=1877034)
2024-12-08T04:29:03,748 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742205_1381 (size=533455)
2024-12-08T04:29:03,749 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742205_1381 (size=533455)
2024-12-08T04:29:03,749 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742205_1381 (size=533455)
2024-12-08T04:29:03,775 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742206_1382 (size=6350155)
2024-12-08T04:29:03,775 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742206_1382 (size=6350155)
2024-12-08T04:29:03,776 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742206_1382 (size=6350155)
2024-12-08T04:29:03,803 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742207_1383 (size=7280644)
2024-12-08T04:29:03,803 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742207_1383 (size=7280644)
2024-12-08T04:29:03,803 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742207_1383 (size=7280644)
2024-12-08T04:29:03,829 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742208_1384 (size=4188619)
2024-12-08T04:29:03,830 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742208_1384 (size=4188619)
2024-12-08T04:29:03,830 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742208_1384 (size=4188619)
2024-12-08T04:29:03,841 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742209_1385 (size=20406)
2024-12-08T04:29:03,841 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742209_1385 (size=20406)
2024-12-08T04:29:03,841 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742209_1385 (size=20406)
2024-12-08T04:29:03,848 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742210_1386 (size=75495)
2024-12-08T04:29:03,849 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742210_1386 (size=75495)
2024-12-08T04:29:03,849 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742210_1386 (size=75495)
2024-12-08T04:29:03,856 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742211_1387 (size=45609)
2024-12-08T04:29:03,856 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742211_1387 (size=45609)
2024-12-08T04:29:03,856 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742211_1387 (size=45609)
2024-12-08T04:29:03,863 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742212_1388 (size=110084)
2024-12-08T04:29:03,863 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742212_1388 (size=110084)
2024-12-08T04:29:03,863 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742212_1388 (size=110084)
2024-12-08T04:29:03,873 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742213_1389 (size=1323991)
2024-12-08T04:29:03,873 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742213_1389 (size=1323991)
2024-12-08T04:29:03,874 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742213_1389 (size=1323991)
2024-12-08T04:29:03,881 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742214_1390 (size=23076)
2024-12-08T04:29:03,881 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742214_1390 (size=23076)
2024-12-08T04:29:03,881 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742214_1390 (size=23076)
2024-12-08T04:29:03,889 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742215_1391 (size=451756)
2024-12-08T04:29:03,889 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742215_1391 (size=451756)
2024-12-08T04:29:03,890 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742215_1391 (size=451756)
2024-12-08T04:29:03,897 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742216_1392 (size=126803)
2024-12-08T04:29:03,897 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742216_1392 (size=126803)
2024-12-08T04:29:03,897 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742216_1392 (size=126803)
2024-12-08T04:29:03,905 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742217_1393 (size=322274)
2024-12-08T04:29:03,905 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742217_1393 (size=322274)
2024-12-08T04:29:03,905 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742217_1393 (size=322274)
2024-12-08T04:29:03,918 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742218_1394 (size=1832290)
2024-12-08T04:29:03,918 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742218_1394 (size=1832290)
2024-12-08T04:29:03,918 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742218_1394 (size=1832290)
2024-12-08T04:29:03,926 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742219_1395 (size=30081)
2024-12-08T04:29:03,926 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742219_1395 (size=30081)
2024-12-08T04:29:03,926 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742219_1395 (size=30081)
2024-12-08T04:29:03,933 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742220_1396 (size=53616)
2024-12-08T04:29:03,933 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742220_1396 (size=53616)
2024-12-08T04:29:03,933 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742220_1396 (size=53616)
2024-12-08T04:29:03,941 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742221_1397 (size=29229)
2024-12-08T04:29:03,941 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742221_1397 (size=29229)
2024-12-08T04:29:03,941 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742221_1397 (size=29229)
2024-12-08T04:29:03,948 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742222_1398 (size=169089)
2024-12-08T04:29:03,948 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742222_1398 (size=169089)
2024-12-08T04:29:03,949 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742222_1398 (size=169089)
2024-12-08T04:29:03,969 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742223_1399 (size=5175431)
2024-12-08T04:29:03,969 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742223_1399 (size=5175431)
2024-12-08T04:29:03,970 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742223_1399 (size=5175431)
2024-12-08T04:29:03,978 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742224_1400 (size=136454)
2024-12-08T04:29:03,978 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742224_1400 (size=136454)
2024-12-08T04:29:03,979 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742224_1400 (size=136454)
2024-12-08T04:29:03,990 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742225_1401 (size=907852)
2024-12-08T04:29:03,991 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742225_1401 (size=907852)
2024-12-08T04:29:03,991 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742225_1401 (size=907852)
2024-12-08T04:29:04,013 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742226_1402 (size=3317408)
2024-12-08T04:29:04,014 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742226_1402 (size=3317408)
2024-12-08T04:29:04,014 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742226_1402 (size=3317408)
2024-12-08T04:29:04,029 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742227_1403 (size=503880)
2024-12-08T04:29:04,029 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742227_1403 (size=503880)
2024-12-08T04:29:04,029 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742227_1403 (size=503880)
2024-12-08T04:29:04,049 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742228_1404 (size=4695811)
2024-12-08T04:29:04,049 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742228_1404 (size=4695811)
2024-12-08T04:29:04,049 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742228_1404 (size=4695811)
2024-12-08T04:29:04,051 WARN  [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set.  User classes may not be found. See Job or Job#setJar(String).
2024-12-08T04:29:04,053 INFO  [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'emptySnaptb0-testEmptyExportFileSystemState' hfile list
2024-12-08T04:29:04,061 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742229_1405 (size=7)
2024-12-08T04:29:04,061 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742229_1405 (size=7)
2024-12-08T04:29:04,061 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742229_1405 (size=7)
2024-12-08T04:29:04,067 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742230_1406 (size=10)
2024-12-08T04:29:04,067 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742230_1406 (size=10)
2024-12-08T04:29:04,068 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742230_1406 (size=10)
2024-12-08T04:29:04,097 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742231_1407 (size=304788)
2024-12-08T04:29:04,097 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742231_1407 (size=304788)
2024-12-08T04:29:04,098 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742231_1407 (size=304788)
2024-12-08T04:29:04,116 WARN  [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start
2024-12-08T04:29:04,116 WARN  [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start
2024-12-08T04:29:04,664 INFO  [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733631992429_0007_000001 (auth:SIMPLE) from 127.0.0.1:37206
2024-12-08T04:29:06,130 WARN  [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties
2024-12-08T04:29:10,046 INFO  [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733631992429_0007_000001 (auth:SIMPLE) from 127.0.0.1:58600
2024-12-08T04:29:10,270 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742232_1408 (size=350438)
2024-12-08T04:29:10,270 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742232_1408 (size=350438)
2024-12-08T04:29:10,270 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742232_1408 (size=350438)
2024-12-08T04:29:11,162 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742233_1409 (size=8568)
2024-12-08T04:29:11,162 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742233_1409 (size=8568)
2024-12-08T04:29:11,163 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742233_1409 (size=8568)
2024-12-08T04:29:11,196 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742234_1410 (size=460)
2024-12-08T04:29:11,197 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742234_1410 (size=460)
2024-12-08T04:29:11,197 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742234_1410 (size=460)
2024-12-08T04:29:11,234 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742235_1411 (size=8568)
2024-12-08T04:29:11,234 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742235_1411 (size=8568)
2024-12-08T04:29:11,235 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742235_1411 (size=8568)
2024-12-08T04:29:11,260 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742236_1412 (size=350438)
2024-12-08T04:29:11,261 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742236_1412 (size=350438)
2024-12-08T04:29:11,261 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742236_1412 (size=350438)
2024-12-08T04:29:13,244 INFO  [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export
2024-12-08T04:29:13,245 INFO  [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity.
2024-12-08T04:29:13,252 INFO  [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: emptySnaptb0-testEmptyExportFileSystemState
2024-12-08T04:29:13,252 INFO  [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot
2024-12-08T04:29:13,253 INFO  [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state
2024-12-08T04:29:13,253 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1548841327_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState
2024-12-08T04:29:13,254 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo
2024-12-08T04:29:13,254 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest
2024-12-08T04:29:13,254 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1548841327_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632142503/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632142503/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState
2024-12-08T04:29:13,255 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632142503/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo
2024-12-08T04:29:13,255 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632142503/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest
2024-12-08T04:29:13,263 INFO  [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testEmptyExportFileSystemState
2024-12-08T04:29:13,264 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testEmptyExportFileSystemState
2024-12-08T04:29:13,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure2.ProcedureExecutor(1098): Stored pid=173, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testEmptyExportFileSystemState
2024-12-08T04:29:13,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173
2024-12-08T04:29:13,267 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733632153267"}]},"ts":"1733632153267"}
2024-12-08T04:29:13,269 INFO  [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLING in hbase:meta
2024-12-08T04:29:13,271 INFO  [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testEmptyExportFileSystemState to state=DISABLING
2024-12-08T04:29:13,272 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=174, ppid=173, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState}]
2024-12-08T04:29:13,274 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=175, ppid=174, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=3281c8447ff6276b158185802c4fc6ee, UNASSIGN}, {pid=176, ppid=174, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=a3e4f54a2e08ec42b9cce80612e0e7d2, UNASSIGN}]
2024-12-08T04:29:13,276 INFO  [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=176, ppid=174, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=a3e4f54a2e08ec42b9cce80612e0e7d2, UNASSIGN
2024-12-08T04:29:13,276 INFO  [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=175, ppid=174, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=3281c8447ff6276b158185802c4fc6ee, UNASSIGN
2024-12-08T04:29:13,281 INFO  [PEWorker-1 {}] assignment.RegionStateStore(202): pid=176 updating hbase:meta row=a3e4f54a2e08ec42b9cce80612e0e7d2, regionState=CLOSING, regionLocation=428ded7e54d6,41743,1733631984189
2024-12-08T04:29:13,281 INFO  [PEWorker-4 {}] assignment.RegionStateStore(202): pid=175 updating hbase:meta row=3281c8447ff6276b158185802c4fc6ee, regionState=CLOSING, regionLocation=428ded7e54d6,45955,1733631983994
2024-12-08T04:29:13,283 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false
2024-12-08T04:29:13,283 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=177, ppid=176, state=RUNNABLE; CloseRegionProcedure a3e4f54a2e08ec42b9cce80612e0e7d2, server=428ded7e54d6,41743,1733631984189}]
2024-12-08T04:29:13,284 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false
2024-12-08T04:29:13,284 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=178, ppid=175, state=RUNNABLE; CloseRegionProcedure 3281c8447ff6276b158185802c4fc6ee, server=428ded7e54d6,45955,1733631983994}]
2024-12-08T04:29:13,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173
2024-12-08T04:29:13,435 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,41743,1733631984189
2024-12-08T04:29:13,435 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] handler.UnassignRegionHandler(124): Close a3e4f54a2e08ec42b9cce80612e0e7d2
2024-12-08T04:29:13,436 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false
2024-12-08T04:29:13,436 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1681): Closing a3e4f54a2e08ec42b9cce80612e0e7d2, disabling compactions & flushes
2024-12-08T04:29:13,436 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,45955,1733631983994
2024-12-08T04:29:13,436 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1703): Closing region testtb-testEmptyExportFileSystemState,1,1733632140933.a3e4f54a2e08ec42b9cce80612e0e7d2.
2024-12-08T04:29:13,436 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testEmptyExportFileSystemState,1,1733632140933.a3e4f54a2e08ec42b9cce80612e0e7d2.
2024-12-08T04:29:13,436 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1791): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1733632140933.a3e4f54a2e08ec42b9cce80612e0e7d2. after waiting 0 ms
2024-12-08T04:29:13,436 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1801): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1733632140933.a3e4f54a2e08ec42b9cce80612e0e7d2.
2024-12-08T04:29:13,436 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] handler.UnassignRegionHandler(124): Close 3281c8447ff6276b158185802c4fc6ee
2024-12-08T04:29:13,436 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false
2024-12-08T04:29:13,436 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1681): Closing 3281c8447ff6276b158185802c4fc6ee, disabling compactions & flushes
2024-12-08T04:29:13,437 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1703): Closing region testtb-testEmptyExportFileSystemState,,1733632140933.3281c8447ff6276b158185802c4fc6ee.
2024-12-08T04:29:13,437 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testEmptyExportFileSystemState,,1733632140933.3281c8447ff6276b158185802c4fc6ee.
2024-12-08T04:29:13,437 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1791): Acquired close lock on testtb-testEmptyExportFileSystemState,,1733632140933.3281c8447ff6276b158185802c4fc6ee. after waiting 0 ms
2024-12-08T04:29:13,437 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1801): Updates disabled for region testtb-testEmptyExportFileSystemState,,1733632140933.3281c8447ff6276b158185802c4fc6ee.
2024-12-08T04:29:13,448 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testEmptyExportFileSystemState/a3e4f54a2e08ec42b9cce80612e0e7d2/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1
2024-12-08T04:29:13,449 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController
2024-12-08T04:29:13,450 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1922): Closed testtb-testEmptyExportFileSystemState,1,1733632140933.a3e4f54a2e08ec42b9cce80612e0e7d2.
2024-12-08T04:29:13,450 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1635): Region close journal for a3e4f54a2e08ec42b9cce80612e0e7d2:

2024-12-08T04:29:13,451 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testEmptyExportFileSystemState/3281c8447ff6276b158185802c4fc6ee/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1
2024-12-08T04:29:13,451 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] handler.UnassignRegionHandler(170): Closed a3e4f54a2e08ec42b9cce80612e0e7d2
2024-12-08T04:29:13,452 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController
2024-12-08T04:29:13,452 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1922): Closed testtb-testEmptyExportFileSystemState,,1733632140933.3281c8447ff6276b158185802c4fc6ee.
2024-12-08T04:29:13,452 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1635): Region close journal for 3281c8447ff6276b158185802c4fc6ee:

2024-12-08T04:29:13,452 INFO  [PEWorker-2 {}] assignment.RegionStateStore(202): pid=176 updating hbase:meta row=a3e4f54a2e08ec42b9cce80612e0e7d2, regionState=CLOSED
2024-12-08T04:29:13,453 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] handler.UnassignRegionHandler(170): Closed 3281c8447ff6276b158185802c4fc6ee
2024-12-08T04:29:13,454 INFO  [PEWorker-5 {}] assignment.RegionStateStore(202): pid=175 updating hbase:meta row=3281c8447ff6276b158185802c4fc6ee, regionState=CLOSED
2024-12-08T04:29:13,456 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=177, resume processing ppid=176
2024-12-08T04:29:13,456 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=177, ppid=176, state=SUCCESS; CloseRegionProcedure a3e4f54a2e08ec42b9cce80612e0e7d2, server=428ded7e54d6,41743,1733631984189 in 170 msec
2024-12-08T04:29:13,458 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=178, resume processing ppid=175
2024-12-08T04:29:13,458 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=178, ppid=175, state=SUCCESS; CloseRegionProcedure 3281c8447ff6276b158185802c4fc6ee, server=428ded7e54d6,45955,1733631983994 in 171 msec
2024-12-08T04:29:13,458 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=176, ppid=174, state=SUCCESS; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=a3e4f54a2e08ec42b9cce80612e0e7d2, UNASSIGN in 182 msec
2024-12-08T04:29:13,459 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=175, resume processing ppid=174
2024-12-08T04:29:13,459 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=175, ppid=174, state=SUCCESS; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=3281c8447ff6276b158185802c4fc6ee, UNASSIGN in 184 msec
2024-12-08T04:29:13,461 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=174, resume processing ppid=173
2024-12-08T04:29:13,461 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=174, ppid=173, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState in 188 msec
2024-12-08T04:29:13,462 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733632153462"}]},"ts":"1733632153462"}
2024-12-08T04:29:13,464 INFO  [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLED in hbase:meta
2024-12-08T04:29:13,466 INFO  [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testEmptyExportFileSystemState to state=DISABLED
2024-12-08T04:29:13,467 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=173, state=SUCCESS; DisableTableProcedure table=testtb-testEmptyExportFileSystemState in 202 msec
2024-12-08T04:29:13,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173
2024-12-08T04:29:13,569 INFO  [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testEmptyExportFileSystemState, procId: 173 completed
2024-12-08T04:29:13,570 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testEmptyExportFileSystemState
2024-12-08T04:29:13,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure2.ProcedureExecutor(1098): Stored pid=179, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState
2024-12-08T04:29:13,572 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=179, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState
2024-12-08T04:29:13,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testEmptyExportFileSystemState
2024-12-08T04:29:13,573 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=179, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState
2024-12-08T04:29:13,574 INFO  [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41743 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testEmptyExportFileSystemState
2024-12-08T04:29:13,578 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testEmptyExportFileSystemState/a3e4f54a2e08ec42b9cce80612e0e7d2
2024-12-08T04:29:13,578 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testEmptyExportFileSystemState/3281c8447ff6276b158185802c4fc6ee
2024-12-08T04:29:13,579 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState
2024-12-08T04:29:13,579 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState
2024-12-08T04:29:13,579 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState
2024-12-08T04:29:13,579 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46421-0x1006fe072e80002, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState
2024-12-08T04:29:13,579 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF
2024-12-08T04:29:13,579 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF
2024-12-08T04:29:13,579 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF
2024-12-08T04:29:13,580 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF
2024-12-08T04:29:13,581 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testEmptyExportFileSystemState/a3e4f54a2e08ec42b9cce80612e0e7d2/cf, FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testEmptyExportFileSystemState/a3e4f54a2e08ec42b9cce80612e0e7d2/recovered.edits]
2024-12-08T04:29:13,581 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testEmptyExportFileSystemState/3281c8447ff6276b158185802c4fc6ee/cf, FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testEmptyExportFileSystemState/3281c8447ff6276b158185802c4fc6ee/recovered.edits]
2024-12-08T04:29:13,582 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState
2024-12-08T04:29:13,582 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:29:13,582 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState
2024-12-08T04:29:13,582 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:29:13,583 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46421-0x1006fe072e80002, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState
2024-12-08T04:29:13,583 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46421-0x1006fe072e80002, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:29:13,583 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState
2024-12-08T04:29:13,583 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:29:13,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179
2024-12-08T04:29:13,585 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04
2024-12-08T04:29:13,585 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04
2024-12-08T04:29:13,585 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04
2024-12-08T04:29:13,585 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04
2024-12-08T04:29:13,588 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testEmptyExportFileSystemState/a3e4f54a2e08ec42b9cce80612e0e7d2/cf/8170fcb224bf489aa543b88f20dc033a to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/archive/data/default/testtb-testEmptyExportFileSystemState/a3e4f54a2e08ec42b9cce80612e0e7d2/cf/8170fcb224bf489aa543b88f20dc033a
2024-12-08T04:29:13,588 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testEmptyExportFileSystemState/3281c8447ff6276b158185802c4fc6ee/cf/8af06ebfefad4612b21117e6d09f476e to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/archive/data/default/testtb-testEmptyExportFileSystemState/3281c8447ff6276b158185802c4fc6ee/cf/8af06ebfefad4612b21117e6d09f476e
2024-12-08T04:29:13,591 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testEmptyExportFileSystemState/a3e4f54a2e08ec42b9cce80612e0e7d2/recovered.edits/9.seqid to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/archive/data/default/testtb-testEmptyExportFileSystemState/a3e4f54a2e08ec42b9cce80612e0e7d2/recovered.edits/9.seqid
2024-12-08T04:29:13,591 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testEmptyExportFileSystemState/3281c8447ff6276b158185802c4fc6ee/recovered.edits/9.seqid to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/archive/data/default/testtb-testEmptyExportFileSystemState/3281c8447ff6276b158185802c4fc6ee/recovered.edits/9.seqid
2024-12-08T04:29:13,592 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testEmptyExportFileSystemState/a3e4f54a2e08ec42b9cce80612e0e7d2
2024-12-08T04:29:13,592 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testEmptyExportFileSystemState/3281c8447ff6276b158185802c4fc6ee
2024-12-08T04:29:13,592 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testEmptyExportFileSystemState regions
2024-12-08T04:29:13,600 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=179, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState
2024-12-08T04:29:13,603 WARN  [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testEmptyExportFileSystemState from hbase:meta
2024-12-08T04:29:13,606 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testEmptyExportFileSystemState' descriptor.
2024-12-08T04:29:13,607 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=179, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState
2024-12-08T04:29:13,607 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testEmptyExportFileSystemState' from region states.
2024-12-08T04:29:13,607 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,,1733632140933.3281c8447ff6276b158185802c4fc6ee.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733632153607"}]},"ts":"9223372036854775807"}
2024-12-08T04:29:13,607 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,1,1733632140933.a3e4f54a2e08ec42b9cce80612e0e7d2.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733632153607"}]},"ts":"9223372036854775807"}
2024-12-08T04:29:13,610 INFO  [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META
2024-12-08T04:29:13,611 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 3281c8447ff6276b158185802c4fc6ee, NAME => 'testtb-testEmptyExportFileSystemState,,1733632140933.3281c8447ff6276b158185802c4fc6ee.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => a3e4f54a2e08ec42b9cce80612e0e7d2, NAME => 'testtb-testEmptyExportFileSystemState,1,1733632140933.a3e4f54a2e08ec42b9cce80612e0e7d2.', STARTKEY => '1', ENDKEY => ''}]
2024-12-08T04:29:13,611 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testEmptyExportFileSystemState' as deleted.
2024-12-08T04:29:13,611 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733632153611"}]},"ts":"9223372036854775807"}
2024-12-08T04:29:13,613 INFO  [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testEmptyExportFileSystemState state from META
2024-12-08T04:29:13,616 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=179, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState
2024-12-08T04:29:13,618 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=179, state=SUCCESS; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState in 47 msec
2024-12-08T04:29:13,632 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState
2024-12-08T04:29:13,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179
2024-12-08T04:29:13,685 INFO  [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testEmptyExportFileSystemState, procId: 179 completed
2024-12-08T04:29:13,691 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testEmptyExportFileSystemState"

2024-12-08T04:29:13,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testEmptyExportFileSystemState
2024-12-08T04:29:13,695 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testEmptyExportFileSystemState"

2024-12-08T04:29:13,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testEmptyExportFileSystemState
2024-12-08T04:29:13,723 INFO  [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testEmptyExportFileSystemState Thread=808 (was 797)
Potentially hanging thread: HFileArchiver-18
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1548841327_22 at /127.0.0.1:52712 [Waiting for operation #5]
	java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
	java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
	app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335)
	app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131)
	java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244)
	java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263)
	java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334)
	java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312)
	app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72)
	app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: hconnection-0x28111a62-shared-pool-40
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: IPC Client (30462390) connection to localhost/127.0.0.1:45333 from appattempt_1733631992429_0007_000001
	java.base@17.0.11/java.lang.Object.wait(Native Method)
	app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042)
	app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093)

Potentially hanging thread: hconnection-0x28111a62-shared-pool-41
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1548841327_22 at /127.0.0.1:35348 [Waiting for operation #5]
	java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
	java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
	app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335)
	app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131)
	java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244)
	java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263)
	java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334)
	java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312)
	app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72)
	app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_289514005_1 at /127.0.0.1:38322 [Waiting for operation #3]
	java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
	java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
	app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335)
	app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131)
	java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244)
	java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263)
	java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334)
	java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312)
	app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72)
	app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: hconnection-0x28111a62-shared-pool-39
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1548841327_22 at /127.0.0.1:39090 [Waiting for operation #2]
	java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
	java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
	app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335)
	app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131)
	java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244)
	java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263)
	java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334)
	java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312)
	app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72)
	app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: hconnection-0x28111a62-shared-pool-42
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_289514005_1 at /127.0.0.1:55896 [Waiting for operation #2]
	java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
	java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
	app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335)
	app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131)
	java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244)
	java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263)
	java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334)
	java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312)
	app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72)
	app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: IPC Client (30462390) connection to localhost/127.0.0.1:37211 from jenkins
	java.base@17.0.11/java.lang.Object.wait(Native Method)
	app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042)
	app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093)

Potentially hanging thread: ApplicationMasterLauncher #12
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: Thread-5680
	java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method)
	java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276)
	java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282)
	java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343)
	java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281)
	java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324)
	java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189)
	java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177)
	java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162)
	java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329)
	java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396)
	app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025)

Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37211
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704)
	java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903)
	app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45333
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704)
	java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903)
	app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: process reaper (pid 2294)
	java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method)
	java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
 - Thread LEAK? -, OpenFileDescriptor=821 (was 799) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=537 (was 547), ProcessCount=17 (was 11) - ProcessCount LEAK? -, AvailableMemoryMB=3292 (was 3994)
2024-12-08T04:29:13,724 WARN  [Time-limited test {}] hbase.ResourceChecker(130): Thread=808 is superior to 500
2024-12-08T04:29:13,747 INFO  [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithChecksum Thread=808, OpenFileDescriptor=821, MaxFileDescriptor=1048576, SystemLoadAverage=537, ProcessCount=17, AvailableMemoryMB=3290
2024-12-08T04:29:13,747 WARN  [Time-limited test {}] hbase.ResourceChecker(130): Thread=808 is superior to 500
2024-12-08T04:29:13,749 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}
2024-12-08T04:29:13,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure2.ProcedureExecutor(1098): Stored pid=180, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportWithChecksum
2024-12-08T04:29:13,751 INFO  [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_PRE_OPERATION
2024-12-08T04:29:13,752 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker
2024-12-08T04:29:13,752 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default"
qualifier: "testtb-testExportWithChecksum"
 procId is: 180
2024-12-08T04:29:13,756 INFO  [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_WRITE_FS_LAYOUT
2024-12-08T04:29:13,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=180
2024-12-08T04:29:13,776 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742237_1413 (size=404)
2024-12-08T04:29:13,777 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742237_1413 (size=404)
2024-12-08T04:29:13,778 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742237_1413 (size=404)
2024-12-08T04:29:13,779 INFO  [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => c0239d354b1f92616e7e26cfd0ea5532, NAME => 'testtb-testExportWithChecksum,,1733632153748.c0239d354b1f92616e7e26cfd0ea5532.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720
2024-12-08T04:29:13,780 INFO  [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 6782a3f2d630839ebc670863784f60b1, NAME => 'testtb-testExportWithChecksum,1,1733632153748.6782a3f2d630839ebc670863784f60b1.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720
2024-12-08T04:29:13,804 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742239_1415 (size=65)
2024-12-08T04:29:13,805 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742239_1415 (size=65)
2024-12-08T04:29:13,805 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742238_1414 (size=65)
2024-12-08T04:29:13,806 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742238_1414 (size=65)
2024-12-08T04:29:13,806 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742239_1415 (size=65)
2024-12-08T04:29:13,807 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742238_1414 (size=65)
2024-12-08T04:29:13,807 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportWithChecksum,1,1733632153748.6782a3f2d630839ebc670863784f60b1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable
2024-12-08T04:29:13,807 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1681): Closing 6782a3f2d630839ebc670863784f60b1, disabling compactions & flushes
2024-12-08T04:29:13,807 INFO  [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportWithChecksum,1,1733632153748.6782a3f2d630839ebc670863784f60b1.
2024-12-08T04:29:13,807 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithChecksum,1,1733632153748.6782a3f2d630839ebc670863784f60b1.
2024-12-08T04:29:13,807 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithChecksum,1,1733632153748.6782a3f2d630839ebc670863784f60b1. after waiting 0 ms
2024-12-08T04:29:13,807 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithChecksum,1,1733632153748.6782a3f2d630839ebc670863784f60b1.
2024-12-08T04:29:13,808 INFO  [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportWithChecksum,1,1733632153748.6782a3f2d630839ebc670863784f60b1.
2024-12-08T04:29:13,808 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1635): Region close journal for 6782a3f2d630839ebc670863784f60b1:

2024-12-08T04:29:13,808 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportWithChecksum,,1733632153748.c0239d354b1f92616e7e26cfd0ea5532.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable
2024-12-08T04:29:13,808 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1681): Closing c0239d354b1f92616e7e26cfd0ea5532, disabling compactions & flushes
2024-12-08T04:29:13,808 INFO  [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportWithChecksum,,1733632153748.c0239d354b1f92616e7e26cfd0ea5532.
2024-12-08T04:29:13,808 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithChecksum,,1733632153748.c0239d354b1f92616e7e26cfd0ea5532.
2024-12-08T04:29:13,808 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithChecksum,,1733632153748.c0239d354b1f92616e7e26cfd0ea5532. after waiting 0 ms
2024-12-08T04:29:13,808 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithChecksum,,1733632153748.c0239d354b1f92616e7e26cfd0ea5532.
2024-12-08T04:29:13,808 INFO  [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportWithChecksum,,1733632153748.c0239d354b1f92616e7e26cfd0ea5532.
2024-12-08T04:29:13,808 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1635): Region close journal for c0239d354b1f92616e7e26cfd0ea5532:

2024-12-08T04:29:13,809 INFO  [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ADD_TO_META
2024-12-08T04:29:13,810 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,1,1733632153748.6782a3f2d630839ebc670863784f60b1.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733632153809"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733632153809"}]},"ts":"1733632153809"}
2024-12-08T04:29:13,810 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,,1733632153748.c0239d354b1f92616e7e26cfd0ea5532.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733632153809"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733632153809"}]},"ts":"1733632153809"}
2024-12-08T04:29:13,812 INFO  [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta.
2024-12-08T04:29:13,812 INFO  [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ASSIGN_REGIONS
2024-12-08T04:29:13,813 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733632153812"}]},"ts":"1733632153812"}
2024-12-08T04:29:13,814 INFO  [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithChecksum, state=ENABLING in hbase:meta
2024-12-08T04:29:13,817 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(202): Hosts are {428ded7e54d6=0} racks are {/default-rack=0}
2024-12-08T04:29:13,819 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 is on host 0
2024-12-08T04:29:13,819 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 is on host 0
2024-12-08T04:29:13,819 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 is on host 0
2024-12-08T04:29:13,819 INFO  [PEWorker-4 {}] balancer.BalancerClusterState(314): server 0 is on rack 0
2024-12-08T04:29:13,819 INFO  [PEWorker-4 {}] balancer.BalancerClusterState(314): server 1 is on rack 0
2024-12-08T04:29:13,819 INFO  [PEWorker-4 {}] balancer.BalancerClusterState(314): server 2 is on rack 0
2024-12-08T04:29:13,819 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1
2024-12-08T04:29:13,819 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=c0239d354b1f92616e7e26cfd0ea5532, ASSIGN}, {pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=6782a3f2d630839ebc670863784f60b1, ASSIGN}]
2024-12-08T04:29:13,820 INFO  [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=6782a3f2d630839ebc670863784f60b1, ASSIGN
2024-12-08T04:29:13,821 INFO  [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=c0239d354b1f92616e7e26cfd0ea5532, ASSIGN
2024-12-08T04:29:13,821 INFO  [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=6782a3f2d630839ebc670863784f60b1, ASSIGN; state=OFFLINE, location=428ded7e54d6,46421,1733631984115; forceNewPlan=false, retain=false
2024-12-08T04:29:13,821 INFO  [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=c0239d354b1f92616e7e26cfd0ea5532, ASSIGN; state=OFFLINE, location=428ded7e54d6,45955,1733631983994; forceNewPlan=false, retain=false
2024-12-08T04:29:13,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=180
2024-12-08T04:29:13,972 INFO  [428ded7e54d6:46337 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 
2024-12-08T04:29:13,972 INFO  [PEWorker-5 {}] assignment.RegionStateStore(202): pid=181 updating hbase:meta row=c0239d354b1f92616e7e26cfd0ea5532, regionState=OPENING, regionLocation=428ded7e54d6,45955,1733631983994
2024-12-08T04:29:13,972 INFO  [PEWorker-3 {}] assignment.RegionStateStore(202): pid=182 updating hbase:meta row=6782a3f2d630839ebc670863784f60b1, regionState=OPENING, regionLocation=428ded7e54d6,46421,1733631984115
2024-12-08T04:29:13,974 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=183, ppid=182, state=RUNNABLE; OpenRegionProcedure 6782a3f2d630839ebc670863784f60b1, server=428ded7e54d6,46421,1733631984115}]
2024-12-08T04:29:13,975 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=184, ppid=181, state=RUNNABLE; OpenRegionProcedure c0239d354b1f92616e7e26cfd0ea5532, server=428ded7e54d6,45955,1733631983994}]
2024-12-08T04:29:14,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=180
2024-12-08T04:29:14,126 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,46421,1733631984115
2024-12-08T04:29:14,127 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,45955,1733631983994
2024-12-08T04:29:14,130 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] handler.AssignRegionHandler(135): Open testtb-testExportWithChecksum,,1733632153748.c0239d354b1f92616e7e26cfd0ea5532.
2024-12-08T04:29:14,130 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] handler.AssignRegionHandler(135): Open testtb-testExportWithChecksum,1,1733632153748.6782a3f2d630839ebc670863784f60b1.
2024-12-08T04:29:14,130 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7285): Opening region: {ENCODED => 6782a3f2d630839ebc670863784f60b1, NAME => 'testtb-testExportWithChecksum,1,1733632153748.6782a3f2d630839ebc670863784f60b1.', STARTKEY => '1', ENDKEY => ''}
2024-12-08T04:29:14,130 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7285): Opening region: {ENCODED => c0239d354b1f92616e7e26cfd0ea5532, NAME => 'testtb-testExportWithChecksum,,1733632153748.c0239d354b1f92616e7e26cfd0ea5532.', STARTKEY => '', ENDKEY => '1'}
2024-12-08T04:29:14,131 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportWithChecksum,,1733632153748.c0239d354b1f92616e7e26cfd0ea5532. service=AccessControlService
2024-12-08T04:29:14,131 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportWithChecksum,1,1733632153748.6782a3f2d630839ebc670863784f60b1. service=AccessControlService
2024-12-08T04:29:14,131 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911.
2024-12-08T04:29:14,131 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911.
2024-12-08T04:29:14,131 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum 6782a3f2d630839ebc670863784f60b1
2024-12-08T04:29:14,131 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum c0239d354b1f92616e7e26cfd0ea5532
2024-12-08T04:29:14,131 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(894): Instantiated testtb-testExportWithChecksum,1,1733632153748.6782a3f2d630839ebc670863784f60b1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable
2024-12-08T04:29:14,131 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(894): Instantiated testtb-testExportWithChecksum,,1733632153748.c0239d354b1f92616e7e26cfd0ea5532.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable
2024-12-08T04:29:14,131 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7327): checking encryption for c0239d354b1f92616e7e26cfd0ea5532
2024-12-08T04:29:14,131 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7327): checking encryption for 6782a3f2d630839ebc670863784f60b1
2024-12-08T04:29:14,131 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7330): checking classloading for c0239d354b1f92616e7e26cfd0ea5532
2024-12-08T04:29:14,131 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7330): checking classloading for 6782a3f2d630839ebc670863784f60b1
2024-12-08T04:29:14,133 INFO  [StoreOpener-c0239d354b1f92616e7e26cfd0ea5532-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region c0239d354b1f92616e7e26cfd0ea5532 
2024-12-08T04:29:14,134 INFO  [StoreOpener-c0239d354b1f92616e7e26cfd0ea5532-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c0239d354b1f92616e7e26cfd0ea5532 columnFamilyName cf
2024-12-08T04:29:14,134 DEBUG [StoreOpener-c0239d354b1f92616e7e26cfd0ea5532-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker
2024-12-08T04:29:14,135 INFO  [StoreOpener-c0239d354b1f92616e7e26cfd0ea5532-1 {}] regionserver.HStore(327): Store=c0239d354b1f92616e7e26cfd0ea5532/cf,  memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE
2024-12-08T04:29:14,136 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithChecksum/c0239d354b1f92616e7e26cfd0ea5532
2024-12-08T04:29:14,136 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithChecksum/c0239d354b1f92616e7e26cfd0ea5532
2024-12-08T04:29:14,138 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1085): writing seq id for c0239d354b1f92616e7e26cfd0ea5532
2024-12-08T04:29:14,140 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithChecksum/c0239d354b1f92616e7e26cfd0ea5532/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1
2024-12-08T04:29:14,140 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1102): Opened c0239d354b1f92616e7e26cfd0ea5532; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68140866, jitterRate=0.015378028154373169}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1}
2024-12-08T04:29:14,141 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1001): Region open journal for c0239d354b1f92616e7e26cfd0ea5532:

2024-12-08T04:29:14,141 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportWithChecksum,,1733632153748.c0239d354b1f92616e7e26cfd0ea5532., pid=184, masterSystemTime=1733632154127
2024-12-08T04:29:14,143 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportWithChecksum,,1733632153748.c0239d354b1f92616e7e26cfd0ea5532.
2024-12-08T04:29:14,143 INFO  [PEWorker-2 {}] assignment.RegionStateStore(202): pid=181 updating hbase:meta row=c0239d354b1f92616e7e26cfd0ea5532, regionState=OPEN, openSeqNum=2, regionLocation=428ded7e54d6,45955,1733631983994
2024-12-08T04:29:14,143 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] handler.AssignRegionHandler(164): Opened testtb-testExportWithChecksum,,1733632153748.c0239d354b1f92616e7e26cfd0ea5532.
2024-12-08T04:29:14,146 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=184, resume processing ppid=181
2024-12-08T04:29:14,146 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=184, ppid=181, state=SUCCESS; OpenRegionProcedure c0239d354b1f92616e7e26cfd0ea5532, server=428ded7e54d6,45955,1733631983994 in 169 msec
2024-12-08T04:29:14,147 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=181, ppid=180, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=c0239d354b1f92616e7e26cfd0ea5532, ASSIGN in 327 msec
2024-12-08T04:29:14,151 INFO  [StoreOpener-6782a3f2d630839ebc670863784f60b1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 6782a3f2d630839ebc670863784f60b1 
2024-12-08T04:29:14,153 INFO  [StoreOpener-6782a3f2d630839ebc670863784f60b1-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6782a3f2d630839ebc670863784f60b1 columnFamilyName cf
2024-12-08T04:29:14,153 DEBUG [StoreOpener-6782a3f2d630839ebc670863784f60b1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker
2024-12-08T04:29:14,153 INFO  [StoreOpener-6782a3f2d630839ebc670863784f60b1-1 {}] regionserver.HStore(327): Store=6782a3f2d630839ebc670863784f60b1/cf,  memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE
2024-12-08T04:29:14,154 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithChecksum/6782a3f2d630839ebc670863784f60b1
2024-12-08T04:29:14,154 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithChecksum/6782a3f2d630839ebc670863784f60b1
2024-12-08T04:29:14,156 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1085): writing seq id for 6782a3f2d630839ebc670863784f60b1
2024-12-08T04:29:14,158 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithChecksum/6782a3f2d630839ebc670863784f60b1/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1
2024-12-08T04:29:14,158 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1102): Opened 6782a3f2d630839ebc670863784f60b1; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74254354, jitterRate=0.10647609829902649}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1}
2024-12-08T04:29:14,158 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1001): Region open journal for 6782a3f2d630839ebc670863784f60b1:

2024-12-08T04:29:14,159 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportWithChecksum,1,1733632153748.6782a3f2d630839ebc670863784f60b1., pid=183, masterSystemTime=1733632154126
2024-12-08T04:29:14,160 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportWithChecksum,1,1733632153748.6782a3f2d630839ebc670863784f60b1.
2024-12-08T04:29:14,160 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] handler.AssignRegionHandler(164): Opened testtb-testExportWithChecksum,1,1733632153748.6782a3f2d630839ebc670863784f60b1.
2024-12-08T04:29:14,161 INFO  [PEWorker-5 {}] assignment.RegionStateStore(202): pid=182 updating hbase:meta row=6782a3f2d630839ebc670863784f60b1, regionState=OPEN, openSeqNum=2, regionLocation=428ded7e54d6,46421,1733631984115
2024-12-08T04:29:14,163 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=183, resume processing ppid=182
2024-12-08T04:29:14,163 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=183, ppid=182, state=SUCCESS; OpenRegionProcedure 6782a3f2d630839ebc670863784f60b1, server=428ded7e54d6,46421,1733631984115 in 188 msec
2024-12-08T04:29:14,166 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=182, resume processing ppid=180
2024-12-08T04:29:14,166 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=182, ppid=180, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=6782a3f2d630839ebc670863784f60b1, ASSIGN in 344 msec
2024-12-08T04:29:14,168 INFO  [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_UPDATE_DESC_CACHE
2024-12-08T04:29:14,168 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733632154168"}]},"ts":"1733632154168"}
2024-12-08T04:29:14,169 INFO  [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithChecksum, state=ENABLED in hbase:meta
2024-12-08T04:29:14,172 INFO  [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_POST_OPERATION
2024-12-08T04:29:14,172 DEBUG [PEWorker-1 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportWithChecksum jenkins: RWXCA
2024-12-08T04:29:14,174 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41743 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA]
2024-12-08T04:29:14,175 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:29:14,175 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:29:14,175 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46421-0x1006fe072e80002, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:29:14,175 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:29:14,177 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04
2024-12-08T04:29:14,178 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04
2024-12-08T04:29:14,178 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04
2024-12-08T04:29:14,178 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04
2024-12-08T04:29:14,178 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04
2024-12-08T04:29:14,178 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04
2024-12-08T04:29:14,178 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04
2024-12-08T04:29:14,179 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04
2024-12-08T04:29:14,179 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=180, state=SUCCESS; CreateTableProcedure table=testtb-testExportWithChecksum in 429 msec
2024-12-08T04:29:14,223 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithChecksum'
2024-12-08T04:29:14,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=180
2024-12-08T04:29:14,362 INFO  [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportWithChecksum, procId: 180 completed
2024-12-08T04:29:14,362 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testExportWithChecksum get assigned. Timeout = 60000ms
2024-12-08T04:29:14,362 INFO  [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1])
2024-12-08T04:29:14,367 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testExportWithChecksum assigned to meta. Checking AM states.
2024-12-08T04:29:14,367 INFO  [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1])
2024-12-08T04:29:14,367 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testExportWithChecksum assigned.
2024-12-08T04:29:14,372 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }
2024-12-08T04:29:14,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733632154372 (current time:1733632154372).
2024-12-08T04:29:14,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0
2024-12-08T04:29:14,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportWithChecksum VERSION not specified, setting to 2
2024-12-08T04:29:14,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot
2024-12-08T04:29:14,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5a1be506 to 127.0.0.1:55878 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@61b75e53
2024-12-08T04:29:14,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4d51790b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null
2024-12-08T04:29:14,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false
2024-12-08T04:29:14,379 INFO  [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33420, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService
2024-12-08T04:29:14,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5a1be506 to 127.0.0.1:55878
2024-12-08T04:29:14,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(514): Stopping rpc client
2024-12-08T04:29:14,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0a9ebdc8 to 127.0.0.1:55878 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@51003f0e
2024-12-08T04:29:14,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@175f9d04, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null
2024-12-08T04:29:14,385 DEBUG [hconnection-0x5a6dca2b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false
2024-12-08T04:29:14,386 INFO  [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33426, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService
2024-12-08T04:29:14,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0a9ebdc8 to 127.0.0.1:55878
2024-12-08T04:29:14,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(514): Stopping rpc client
2024-12-08T04:29:14,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA]
2024-12-08T04:29:14,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot...
2024-12-08T04:29:14,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure2.ProcedureExecutor(1098): Stored pid=185, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }
2024-12-08T04:29:14,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 185
2024-12-08T04:29:14,391 INFO  [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE
2024-12-08T04:29:14,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185
2024-12-08T04:29:14,392 INFO  [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION
2024-12-08T04:29:14,394 INFO  [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO
2024-12-08T04:29:14,407 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742240_1416 (size=161)
2024-12-08T04:29:14,408 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742240_1416 (size=161)
2024-12-08T04:29:14,408 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742240_1416 (size=161)
2024-12-08T04:29:14,409 INFO  [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS
2024-12-08T04:29:14,410 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=186, ppid=185, state=RUNNABLE; SnapshotRegionProcedure c0239d354b1f92616e7e26cfd0ea5532}, {pid=187, ppid=185, state=RUNNABLE; SnapshotRegionProcedure 6782a3f2d630839ebc670863784f60b1}]
2024-12-08T04:29:14,411 INFO  [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=187, ppid=185, state=RUNNABLE; SnapshotRegionProcedure 6782a3f2d630839ebc670863784f60b1
2024-12-08T04:29:14,411 INFO  [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=186, ppid=185, state=RUNNABLE; SnapshotRegionProcedure c0239d354b1f92616e7e26cfd0ea5532
2024-12-08T04:29:14,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185
2024-12-08T04:29:14,562 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,46421,1733631984115
2024-12-08T04:29:14,562 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,45955,1733631983994
2024-12-08T04:29:14,562 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46421 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=187
2024-12-08T04:29:14,562 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45955 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=186
2024-12-08T04:29:14,563 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1733632153748.6782a3f2d630839ebc670863784f60b1.
2024-12-08T04:29:14,563 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegion(2538): Flush status journal for 6782a3f2d630839ebc670863784f60b1:

2024-12-08T04:29:14,563 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1733632153748.6782a3f2d630839ebc670863784f60b1. for emptySnaptb0-testExportWithChecksum completed.
2024-12-08T04:29:14,563 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1733632153748.6782a3f2d630839ebc670863784f60b1.' region-info for snapshot=emptySnaptb0-testExportWithChecksum
2024-12-08T04:29:14,563 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(246): Creating references for hfiles
2024-12-08T04:29:14,563 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles
2024-12-08T04:29:14,564 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1733632153748.c0239d354b1f92616e7e26cfd0ea5532.
2024-12-08T04:29:14,564 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegion(2538): Flush status journal for c0239d354b1f92616e7e26cfd0ea5532:

2024-12-08T04:29:14,564 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1733632153748.c0239d354b1f92616e7e26cfd0ea5532. for emptySnaptb0-testExportWithChecksum completed.
2024-12-08T04:29:14,564 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1733632153748.c0239d354b1f92616e7e26cfd0ea5532.' region-info for snapshot=emptySnaptb0-testExportWithChecksum
2024-12-08T04:29:14,564 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(246): Creating references for hfiles
2024-12-08T04:29:14,564 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles
2024-12-08T04:29:14,579 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742241_1417 (size=68)
2024-12-08T04:29:14,579 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742241_1417 (size=68)
2024-12-08T04:29:14,579 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742241_1417 (size=68)
2024-12-08T04:29:14,580 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1733632153748.6782a3f2d630839ebc670863784f60b1.
2024-12-08T04:29:14,580 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=187
2024-12-08T04:29:14,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster(4106): Remote procedure done, pid=187
2024-12-08T04:29:14,580 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region 6782a3f2d630839ebc670863784f60b1
2024-12-08T04:29:14,581 INFO  [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=187, ppid=185, state=RUNNABLE; SnapshotRegionProcedure 6782a3f2d630839ebc670863784f60b1
2024-12-08T04:29:14,586 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=187, ppid=185, state=SUCCESS; SnapshotRegionProcedure 6782a3f2d630839ebc670863784f60b1 in 173 msec
2024-12-08T04:29:14,590 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742242_1418 (size=68)
2024-12-08T04:29:14,590 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742242_1418 (size=68)
2024-12-08T04:29:14,591 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742242_1418 (size=68)
2024-12-08T04:29:14,591 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1733632153748.c0239d354b1f92616e7e26cfd0ea5532.
2024-12-08T04:29:14,591 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=186
2024-12-08T04:29:14,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster(4106): Remote procedure done, pid=186
2024-12-08T04:29:14,592 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region c0239d354b1f92616e7e26cfd0ea5532
2024-12-08T04:29:14,592 INFO  [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=186, ppid=185, state=RUNNABLE; SnapshotRegionProcedure c0239d354b1f92616e7e26cfd0ea5532
2024-12-08T04:29:14,595 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=186, resume processing ppid=185
2024-12-08T04:29:14,595 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=186, ppid=185, state=SUCCESS; SnapshotRegionProcedure c0239d354b1f92616e7e26cfd0ea5532 in 183 msec
2024-12-08T04:29:14,595 INFO  [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS
2024-12-08T04:29:14,596 INFO  [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION
2024-12-08T04:29:14,597 INFO  [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT
2024-12-08T04:29:14,597 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithChecksum
2024-12-08T04:29:14,598 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum
2024-12-08T04:29:14,614 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742243_1419 (size=543)
2024-12-08T04:29:14,615 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742243_1419 (size=543)
2024-12-08T04:29:14,615 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742243_1419 (size=543)
2024-12-08T04:29:14,622 INFO  [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT
2024-12-08T04:29:14,627 INFO  [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT
2024-12-08T04:29:14,628 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/emptySnaptb0-testExportWithChecksum
2024-12-08T04:29:14,629 INFO  [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION
2024-12-08T04:29:14,629 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 185
2024-12-08T04:29:14,630 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=185, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 240 msec
2024-12-08T04:29:14,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185
2024-12-08T04:29:14,693 INFO  [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum, procId: 185 completed
2024-12-08T04:29:14,705 INFO  [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45955 {}] regionserver.HRegion(8254): writing data to region testtb-testExportWithChecksum,,1733632153748.c0239d354b1f92616e7e26cfd0ea5532. with WAL disabled. Data may be lost in the event of a crash.
2024-12-08T04:29:14,708 INFO  [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46421 {}] regionserver.HRegion(8254): writing data to region testtb-testExportWithChecksum,1,1733632153748.6782a3f2d630839ebc670863784f60b1. with WAL disabled. Data may be lost in the event of a crash.
2024-12-08T04:29:14,714 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportWithChecksum
2024-12-08T04:29:14,714 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportWithChecksum,,1733632153748.c0239d354b1f92616e7e26cfd0ea5532.
2024-12-08T04:29:14,714 INFO  [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1])
2024-12-08T04:29:14,731 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }
2024-12-08T04:29:14,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733632154731 (current time:1733632154731).
2024-12-08T04:29:14,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0
2024-12-08T04:29:14,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportWithChecksum VERSION not specified, setting to 2
2024-12-08T04:29:14,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot
2024-12-08T04:29:14,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1e96ebae to 127.0.0.1:55878 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2a9e56c5
2024-12-08T04:29:14,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1da91711, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null
2024-12-08T04:29:14,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false
2024-12-08T04:29:14,738 INFO  [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33430, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService
2024-12-08T04:29:14,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1e96ebae to 127.0.0.1:55878
2024-12-08T04:29:14,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(514): Stopping rpc client
2024-12-08T04:29:14,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x16de6b47 to 127.0.0.1:55878 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4eae3f27
2024-12-08T04:29:14,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1beab87c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null
2024-12-08T04:29:14,752 DEBUG [hconnection-0x45ecf700-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false
2024-12-08T04:29:14,753 INFO  [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33432, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService
2024-12-08T04:29:14,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x16de6b47 to 127.0.0.1:55878
2024-12-08T04:29:14,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(514): Stopping rpc client
2024-12-08T04:29:14,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA]
2024-12-08T04:29:14,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot...
2024-12-08T04:29:14,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure2.ProcedureExecutor(1098): Stored pid=188, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }
2024-12-08T04:29:14,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 188
2024-12-08T04:29:14,758 INFO  [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE
2024-12-08T04:29:14,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=188
2024-12-08T04:29:14,759 INFO  [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION
2024-12-08T04:29:14,762 INFO  [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO
2024-12-08T04:29:14,770 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742244_1420 (size=156)
2024-12-08T04:29:14,771 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742244_1420 (size=156)
2024-12-08T04:29:14,771 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742244_1420 (size=156)
2024-12-08T04:29:14,772 INFO  [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS
2024-12-08T04:29:14,772 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=189, ppid=188, state=RUNNABLE; SnapshotRegionProcedure c0239d354b1f92616e7e26cfd0ea5532}, {pid=190, ppid=188, state=RUNNABLE; SnapshotRegionProcedure 6782a3f2d630839ebc670863784f60b1}]
2024-12-08T04:29:14,773 INFO  [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=189, ppid=188, state=RUNNABLE; SnapshotRegionProcedure c0239d354b1f92616e7e26cfd0ea5532
2024-12-08T04:29:14,773 INFO  [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=190, ppid=188, state=RUNNABLE; SnapshotRegionProcedure 6782a3f2d630839ebc670863784f60b1
2024-12-08T04:29:14,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=188
2024-12-08T04:29:14,924 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,46421,1733631984115
2024-12-08T04:29:14,924 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,45955,1733631983994
2024-12-08T04:29:14,925 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46421 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=190
2024-12-08T04:29:14,925 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45955 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=189
2024-12-08T04:29:14,925 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1733632153748.6782a3f2d630839ebc670863784f60b1.
2024-12-08T04:29:14,925 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1733632153748.c0239d354b1f92616e7e26cfd0ea5532.
2024-12-08T04:29:14,925 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.HRegion(2837): Flushing c0239d354b1f92616e7e26cfd0ea5532 1/1 column families, dataSize=65 B heapSize=400 B
2024-12-08T04:29:14,925 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.HRegion(2837): Flushing 6782a3f2d630839ebc670863784f60b1 1/1 column families, dataSize=3.19 KB heapSize=7.14 KB
2024-12-08T04:29:14,945 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithChecksum/6782a3f2d630839ebc670863784f60b1/.tmp/cf/dce2702640eb489a82b620d5702b7ccc is 71, key is 189b1e7a6922737c698387c5bac250a1/cf:q/1733632154708/Put/seqid=0
2024-12-08T04:29:14,953 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithChecksum/c0239d354b1f92616e7e26cfd0ea5532/.tmp/cf/c1b10700cb3f441e8606444718a202b3 is 69, key is 0349a7303550a61ccc8f6fefd1d9042b9/cf:q/1733632154705/Put/seqid=0
2024-12-08T04:29:14,958 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742245_1421 (size=8460)
2024-12-08T04:29:14,959 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742245_1421 (size=8460)
2024-12-08T04:29:14,959 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742245_1421 (size=8460)
2024-12-08T04:29:14,959 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.19 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithChecksum/6782a3f2d630839ebc670863784f60b1/.tmp/cf/dce2702640eb489a82b620d5702b7ccc
2024-12-08T04:29:14,965 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithChecksum/6782a3f2d630839ebc670863784f60b1/.tmp/cf/dce2702640eb489a82b620d5702b7ccc as hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithChecksum/6782a3f2d630839ebc670863784f60b1/cf/dce2702640eb489a82b620d5702b7ccc
2024-12-08T04:29:14,970 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithChecksum/6782a3f2d630839ebc670863784f60b1/cf/dce2702640eb489a82b620d5702b7ccc, entries=49, sequenceid=6, filesize=8.3 K
2024-12-08T04:29:14,971 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742246_1422 (size=5149)
2024-12-08T04:29:14,971 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.HRegion(3040): Finished flush of dataSize ~3.19 KB/3271, heapSize ~7.13 KB/7296, currentSize=0 B/0 for 6782a3f2d630839ebc670863784f60b1 in 46ms, sequenceid=6, compaction requested=false
2024-12-08T04:29:14,971 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742246_1422 (size=5149)
2024-12-08T04:29:14,971 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.HRegion(2538): Flush status journal for 6782a3f2d630839ebc670863784f60b1:

2024-12-08T04:29:14,971 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1733632153748.6782a3f2d630839ebc670863784f60b1. for snaptb0-testExportWithChecksum completed.
2024-12-08T04:29:14,971 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1733632153748.6782a3f2d630839ebc670863784f60b1.' region-info for snapshot=snaptb0-testExportWithChecksum
2024-12-08T04:29:14,971 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] snapshot.SnapshotManifest(246): Creating references for hfiles
2024-12-08T04:29:14,972 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithChecksum/6782a3f2d630839ebc670863784f60b1/cf/dce2702640eb489a82b620d5702b7ccc] hfiles
2024-12-08T04:29:14,972 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithChecksum/6782a3f2d630839ebc670863784f60b1/cf/dce2702640eb489a82b620d5702b7ccc for snapshot=snaptb0-testExportWithChecksum
2024-12-08T04:29:14,973 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742246_1422 (size=5149)
2024-12-08T04:29:14,973 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithChecksum/c0239d354b1f92616e7e26cfd0ea5532/.tmp/cf/c1b10700cb3f441e8606444718a202b3
2024-12-08T04:29:14,978 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithChecksum/c0239d354b1f92616e7e26cfd0ea5532/.tmp/cf/c1b10700cb3f441e8606444718a202b3 as hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithChecksum/c0239d354b1f92616e7e26cfd0ea5532/cf/c1b10700cb3f441e8606444718a202b3
2024-12-08T04:29:14,980 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742247_1423 (size=107)
2024-12-08T04:29:14,980 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742247_1423 (size=107)
2024-12-08T04:29:14,980 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742247_1423 (size=107)
2024-12-08T04:29:14,981 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1733632153748.6782a3f2d630839ebc670863784f60b1.
2024-12-08T04:29:14,981 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=190
2024-12-08T04:29:14,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster(4106): Remote procedure done, pid=190
2024-12-08T04:29:14,981 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region 6782a3f2d630839ebc670863784f60b1
2024-12-08T04:29:14,981 INFO  [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=190, ppid=188, state=RUNNABLE; SnapshotRegionProcedure 6782a3f2d630839ebc670863784f60b1
2024-12-08T04:29:14,984 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithChecksum/c0239d354b1f92616e7e26cfd0ea5532/cf/c1b10700cb3f441e8606444718a202b3, entries=1, sequenceid=6, filesize=5.0 K
2024-12-08T04:29:14,985 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.HRegion(3040): Finished flush of dataSize ~65 B/65, heapSize ~384 B/384, currentSize=0 B/0 for c0239d354b1f92616e7e26cfd0ea5532 in 60ms, sequenceid=6, compaction requested=false
2024-12-08T04:29:14,985 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.HRegion(2538): Flush status journal for c0239d354b1f92616e7e26cfd0ea5532:

2024-12-08T04:29:14,985 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1733632153748.c0239d354b1f92616e7e26cfd0ea5532. for snaptb0-testExportWithChecksum completed.
2024-12-08T04:29:14,985 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1733632153748.c0239d354b1f92616e7e26cfd0ea5532.' region-info for snapshot=snaptb0-testExportWithChecksum
2024-12-08T04:29:14,985 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] snapshot.SnapshotManifest(246): Creating references for hfiles
2024-12-08T04:29:14,985 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithChecksum/c0239d354b1f92616e7e26cfd0ea5532/cf/c1b10700cb3f441e8606444718a202b3] hfiles
2024-12-08T04:29:14,985 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithChecksum/c0239d354b1f92616e7e26cfd0ea5532/cf/c1b10700cb3f441e8606444718a202b3 for snapshot=snaptb0-testExportWithChecksum
2024-12-08T04:29:14,986 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=190, ppid=188, state=SUCCESS; SnapshotRegionProcedure 6782a3f2d630839ebc670863784f60b1 in 210 msec
2024-12-08T04:29:14,991 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742248_1424 (size=107)
2024-12-08T04:29:14,991 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742248_1424 (size=107)
2024-12-08T04:29:14,992 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742248_1424 (size=107)
2024-12-08T04:29:14,992 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1733632153748.c0239d354b1f92616e7e26cfd0ea5532.
2024-12-08T04:29:14,992 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=189
2024-12-08T04:29:14,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster(4106): Remote procedure done, pid=189
2024-12-08T04:29:14,992 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region c0239d354b1f92616e7e26cfd0ea5532
2024-12-08T04:29:14,993 INFO  [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=189, ppid=188, state=RUNNABLE; SnapshotRegionProcedure c0239d354b1f92616e7e26cfd0ea5532
2024-12-08T04:29:14,995 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=189, resume processing ppid=188
2024-12-08T04:29:14,995 INFO  [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS
2024-12-08T04:29:14,995 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=189, ppid=188, state=SUCCESS; SnapshotRegionProcedure c0239d354b1f92616e7e26cfd0ea5532 in 221 msec
2024-12-08T04:29:14,996 INFO  [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION
2024-12-08T04:29:14,996 INFO  [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT
2024-12-08T04:29:14,996 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithChecksum
2024-12-08T04:29:14,997 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum
2024-12-08T04:29:15,014 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742249_1425 (size=621)
2024-12-08T04:29:15,014 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742249_1425 (size=621)
2024-12-08T04:29:15,014 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742249_1425 (size=621)
2024-12-08T04:29:15,018 INFO  [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT
2024-12-08T04:29:15,023 INFO  [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT
2024-12-08T04:29:15,024 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/snaptb0-testExportWithChecksum
2024-12-08T04:29:15,025 INFO  [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION
2024-12-08T04:29:15,025 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 188
2024-12-08T04:29:15,026 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=188, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 268 msec
2024-12-08T04:29:15,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=188
2024-12-08T04:29:15,061 INFO  [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum, procId: 188 completed
2024-12-08T04:29:15,061 INFO  [Time-limited test {}] snapshot.TestExportSnapshot(476): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/local-export-1733632155061
2024-12-08T04:29:15,061 INFO  [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/local-export-1733632155061, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/local-export-1733632155061, srcFsUri=hdfs://localhost:41407, srcDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720
2024-12-08T04:29:15,106 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:41407, inputRoot=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720
2024-12-08T04:29:15,106 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=org.apache.hadoop.fs.LocalFileSystem@66b9faa9, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/local-export-1733632155061, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/local-export-1733632155061/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum
2024-12-08T04:29:15,109 INFO  [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity.
2024-12-08T04:29:15,113 INFO  [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/snaptb0-testExportWithChecksum to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/local-export-1733632155061/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum
2024-12-08T04:29:15,146 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar
2024-12-08T04:29:15,147 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar
2024-12-08T04:29:15,147 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar
2024-12-08T04:29:15,148 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar
2024-12-08T04:29:16,345 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/hadoop-6874809571231212106.jar
2024-12-08T04:29:16,345 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar
2024-12-08T04:29:16,345 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar
2024-12-08T04:29:16,415 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/hadoop-2751837890430069408.jar
2024-12-08T04:29:16,416 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar
2024-12-08T04:29:16,416 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar
2024-12-08T04:29:16,416 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar
2024-12-08T04:29:16,416 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar
2024-12-08T04:29:16,416 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar
2024-12-08T04:29:16,417 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar
2024-12-08T04:29:16,417 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar
2024-12-08T04:29:16,417 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar
2024-12-08T04:29:16,417 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar
2024-12-08T04:29:16,417 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar
2024-12-08T04:29:16,418 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar
2024-12-08T04:29:16,418 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar
2024-12-08T04:29:16,418 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar
2024-12-08T04:29:16,418 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar
2024-12-08T04:29:16,419 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar
2024-12-08T04:29:16,419 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar
2024-12-08T04:29:16,419 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar
2024-12-08T04:29:16,419 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar
2024-12-08T04:29:16,420 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar
2024-12-08T04:29:16,420 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar
2024-12-08T04:29:16,420 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar
2024-12-08T04:29:16,420 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar
2024-12-08T04:29:16,420 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar
2024-12-08T04:29:16,421 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar
2024-12-08T04:29:16,421 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar
2024-12-08T04:29:16,475 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742250_1426 (size=127628)
2024-12-08T04:29:16,475 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742250_1426 (size=127628)
2024-12-08T04:29:16,475 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742250_1426 (size=127628)
2024-12-08T04:29:16,493 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742251_1427 (size=2172101)
2024-12-08T04:29:16,493 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742251_1427 (size=2172101)
2024-12-08T04:29:16,494 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742251_1427 (size=2172101)
2024-12-08T04:29:16,501 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742252_1428 (size=213228)
2024-12-08T04:29:16,502 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742252_1428 (size=213228)
2024-12-08T04:29:16,502 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742252_1428 (size=213228)
2024-12-08T04:29:16,517 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742253_1429 (size=1877034)
2024-12-08T04:29:16,518 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742253_1429 (size=1877034)
2024-12-08T04:29:16,518 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742253_1429 (size=1877034)
2024-12-08T04:29:16,526 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742254_1430 (size=533455)
2024-12-08T04:29:16,526 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742254_1430 (size=533455)
2024-12-08T04:29:16,527 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742254_1430 (size=533455)
2024-12-08T04:29:16,553 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742255_1431 (size=7280644)
2024-12-08T04:29:16,553 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742255_1431 (size=7280644)
2024-12-08T04:29:16,553 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742255_1431 (size=7280644)
2024-12-08T04:29:16,572 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742256_1432 (size=4188619)
2024-12-08T04:29:16,572 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742256_1432 (size=4188619)
2024-12-08T04:29:16,572 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742256_1432 (size=4188619)
2024-12-08T04:29:16,581 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742257_1433 (size=20406)
2024-12-08T04:29:16,581 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742257_1433 (size=20406)
2024-12-08T04:29:16,582 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742257_1433 (size=20406)
2024-12-08T04:29:16,588 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742258_1434 (size=75495)
2024-12-08T04:29:16,588 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742258_1434 (size=75495)
2024-12-08T04:29:16,589 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742258_1434 (size=75495)
2024-12-08T04:29:16,595 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742259_1435 (size=45609)
2024-12-08T04:29:16,595 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742259_1435 (size=45609)
2024-12-08T04:29:16,596 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742259_1435 (size=45609)
2024-12-08T04:29:16,602 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742260_1436 (size=110084)
2024-12-08T04:29:16,602 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742260_1436 (size=110084)
2024-12-08T04:29:16,603 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742260_1436 (size=110084)
2024-12-08T04:29:16,613 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742261_1437 (size=1323991)
2024-12-08T04:29:16,613 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742261_1437 (size=1323991)
2024-12-08T04:29:16,614 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742261_1437 (size=1323991)
2024-12-08T04:29:16,620 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742262_1438 (size=23076)
2024-12-08T04:29:16,620 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742262_1438 (size=23076)
2024-12-08T04:29:16,620 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742262_1438 (size=23076)
2024-12-08T04:29:16,627 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742263_1439 (size=126803)
2024-12-08T04:29:16,627 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742263_1439 (size=126803)
2024-12-08T04:29:16,627 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742263_1439 (size=126803)
2024-12-08T04:29:16,634 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742264_1440 (size=322274)
2024-12-08T04:29:16,634 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742264_1440 (size=322274)
2024-12-08T04:29:16,635 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742264_1440 (size=322274)
2024-12-08T04:29:16,645 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742265_1441 (size=1832290)
2024-12-08T04:29:16,646 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742265_1441 (size=1832290)
2024-12-08T04:29:16,646 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742265_1441 (size=1832290)
2024-12-08T04:29:16,654 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742266_1442 (size=451756)
2024-12-08T04:29:16,654 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742266_1442 (size=451756)
2024-12-08T04:29:16,654 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742266_1442 (size=451756)
2024-12-08T04:29:16,661 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742267_1443 (size=30081)
2024-12-08T04:29:16,661 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742267_1443 (size=30081)
2024-12-08T04:29:16,661 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742267_1443 (size=30081)
2024-12-08T04:29:16,668 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742268_1444 (size=53616)
2024-12-08T04:29:16,668 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742268_1444 (size=53616)
2024-12-08T04:29:16,668 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742268_1444 (size=53616)
2024-12-08T04:29:16,675 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742269_1445 (size=29229)
2024-12-08T04:29:16,675 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742269_1445 (size=29229)
2024-12-08T04:29:16,675 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742269_1445 (size=29229)
2024-12-08T04:29:16,682 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742270_1446 (size=169089)
2024-12-08T04:29:16,682 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742270_1446 (size=169089)
2024-12-08T04:29:16,682 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742270_1446 (size=169089)
2024-12-08T04:29:16,704 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742271_1447 (size=6350155)
2024-12-08T04:29:16,704 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742271_1447 (size=6350155)
2024-12-08T04:29:16,704 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742271_1447 (size=6350155)
2024-12-08T04:29:16,726 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742272_1448 (size=5175431)
2024-12-08T04:29:16,727 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742272_1448 (size=5175431)
2024-12-08T04:29:16,727 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742272_1448 (size=5175431)
2024-12-08T04:29:16,748 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742273_1449 (size=136454)
2024-12-08T04:29:16,748 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742273_1449 (size=136454)
2024-12-08T04:29:16,749 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742273_1449 (size=136454)
2024-12-08T04:29:16,774 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742274_1450 (size=907852)
2024-12-08T04:29:16,775 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742274_1450 (size=907852)
2024-12-08T04:29:16,776 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742274_1450 (size=907852)
2024-12-08T04:29:16,801 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742275_1451 (size=3317408)
2024-12-08T04:29:16,802 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742275_1451 (size=3317408)
2024-12-08T04:29:16,802 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742275_1451 (size=3317408)
2024-12-08T04:29:16,810 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742276_1452 (size=503880)
2024-12-08T04:29:16,811 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742276_1452 (size=503880)
2024-12-08T04:29:16,811 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742276_1452 (size=503880)
2024-12-08T04:29:16,833 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742277_1453 (size=4695811)
2024-12-08T04:29:16,834 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742277_1453 (size=4695811)
2024-12-08T04:29:16,834 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742277_1453 (size=4695811)
2024-12-08T04:29:16,835 WARN  [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set.  User classes may not be found. See Job or Job#setJar(String).
2024-12-08T04:29:16,837 INFO  [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list
2024-12-08T04:29:16,839 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K
2024-12-08T04:29:16,845 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742278_1454 (size=338)
2024-12-08T04:29:16,846 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742278_1454 (size=338)
2024-12-08T04:29:16,846 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742278_1454 (size=338)
2024-12-08T04:29:16,851 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742279_1455 (size=15)
2024-12-08T04:29:16,851 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742279_1455 (size=15)
2024-12-08T04:29:16,852 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742279_1455 (size=15)
2024-12-08T04:29:16,865 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742280_1456 (size=304929)
2024-12-08T04:29:16,866 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742280_1456 (size=304929)
2024-12-08T04:29:16,866 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742280_1456 (size=304929)
2024-12-08T04:29:17,334 WARN  [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start
2024-12-08T04:29:17,334 WARN  [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start
2024-12-08T04:29:17,337 INFO  [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733631992429_0007_000001 (auth:SIMPLE) from 127.0.0.1:53188
2024-12-08T04:29:17,354 WARN  [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-1_3/usercache/jenkins/appcache/application_1733631992429_0007/container_1733631992429_0007_01_000001/launch_container.sh]
2024-12-08T04:29:17,354 WARN  [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-1_3/usercache/jenkins/appcache/application_1733631992429_0007/container_1733631992429_0007_01_000001/container_tokens]
2024-12-08T04:29:17,354 WARN  [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-1_3/usercache/jenkins/appcache/application_1733631992429_0007/container_1733631992429_0007_01_000001/sysfs]
2024-12-08T04:29:17,776 INFO  [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733631992429_0008_000001 (auth:SIMPLE) from 127.0.0.1:51544
2024-12-08T04:29:18,980 WARN  [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties
2024-12-08T04:29:22,004 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details.
2024-12-08T04:29:23,166 INFO  [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733631992429_0008_000001 (auth:SIMPLE) from 127.0.0.1:46654
2024-12-08T04:29:23,426 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742281_1457 (size=350603)
2024-12-08T04:29:23,426 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742281_1457 (size=350603)
2024-12-08T04:29:23,426 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742281_1457 (size=350603)
2024-12-08T04:29:23,632 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum
2024-12-08T04:29:23,632 INFO  [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum Metrics about Tables on a single HBase RegionServer
2024-12-08T04:29:25,423 INFO  [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733631992429_0008_000001 (auth:SIMPLE) from 127.0.0.1:35422
2024-12-08T04:29:26,692 DEBUG [master/428ded7e54d6:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region b0618e5cec1ba295985f16f1dd465d87 changed from -1.0 to 0.0, refreshing cache
2024-12-08T04:29:26,692 DEBUG [master/428ded7e54d6:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region c0239d354b1f92616e7e26cfd0ea5532 changed from -1.0 to 0.0, refreshing cache
2024-12-08T04:29:26,692 DEBUG [master/428ded7e54d6:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 430b6d8c8c366152be49a2e6dcaf8f87 changed from -1.0 to 0.0, refreshing cache
2024-12-08T04:29:26,692 DEBUG [master/428ded7e54d6:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 6782a3f2d630839ebc670863784f60b1 changed from -1.0 to 0.0, refreshing cache
2024-12-08T04:29:30,484 WARN  [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-1_0/usercache/jenkins/appcache/application_1733631992429_0008/container_1733631992429_0008_01_000002/launch_container.sh]
2024-12-08T04:29:30,484 WARN  [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-1_0/usercache/jenkins/appcache/application_1733631992429_0008/container_1733631992429_0008_01_000002/container_tokens]
2024-12-08T04:29:30,484 WARN  [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-1_0/usercache/jenkins/appcache/application_1733631992429_0008/container_1733631992429_0008_01_000002/sysfs]
Error: java.io.IOException: Checksum mismatch between hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithChecksum/6782a3f2d630839ebc670863784f60b1/cf/dce2702640eb489a82b620d5702b7ccc and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/local-export-1733632155061/archive/data/default/testtb-testExportWithChecksum/6782a3f2d630839ebc670863784f60b1/cf/dce2702640eb489a82b620d5702b7ccc. Input and output filesystems are of different types.
Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different.
 Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks.
 (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.)

	at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:596)
	at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:332)
	at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:254)
	at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:180)
	at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145)
	at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800)
	at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348)
	at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178)
	at java.base/java.security.AccessController.doPrivileged(AccessController.java:712)
	at java.base/javax.security.auth.Subject.doAs(Subject.java:439)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953)
	at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172)

2024-12-08T04:29:32,275 INFO  [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733631992429_0008_000001 (auth:SIMPLE) from 127.0.0.1:35436
2024-12-08T04:29:34,812 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region b0618e5cec1ba295985f16f1dd465d87, had cached 0 bytes from a total of 5356
2024-12-08T04:29:34,812 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 430b6d8c8c366152be49a2e6dcaf8f87, had cached 0 bytes from a total of 8258
2024-12-08T04:29:35,956 WARN  [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-1_1/usercache/jenkins/appcache/application_1733631992429_0008/container_1733631992429_0008_01_000003/launch_container.sh]
2024-12-08T04:29:35,956 WARN  [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-1_1/usercache/jenkins/appcache/application_1733631992429_0008/container_1733631992429_0008_01_000003/container_tokens]
2024-12-08T04:29:35,956 WARN  [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-1_1/usercache/jenkins/appcache/application_1733631992429_0008/container_1733631992429_0008_01_000003/sysfs]
Error: java.io.IOException: Checksum mismatch between hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithChecksum/6782a3f2d630839ebc670863784f60b1/cf/dce2702640eb489a82b620d5702b7ccc and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/local-export-1733632155061/archive/data/default/testtb-testExportWithChecksum/6782a3f2d630839ebc670863784f60b1/cf/dce2702640eb489a82b620d5702b7ccc. Input and output filesystems are of different types.
Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different.
 Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks.
 (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.)

	at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:596)
	at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:332)
	at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:254)
	at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:180)
	at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145)
	at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800)
	at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348)
	at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178)
	at java.base/java.security.AccessController.doPrivileged(AccessController.java:712)
	at java.base/javax.security.auth.Subject.doAs(Subject.java:439)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953)
	at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172)

2024-12-08T04:29:37,290 INFO  [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733631992429_0008_000001 (auth:SIMPLE) from 127.0.0.1:56030
2024-12-08T04:29:40,905 WARN  [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-0_2/usercache/jenkins/appcache/application_1733631992429_0008/container_1733631992429_0008_01_000004/launch_container.sh]
2024-12-08T04:29:40,905 WARN  [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-0_2/usercache/jenkins/appcache/application_1733631992429_0008/container_1733631992429_0008_01_000004/container_tokens]
2024-12-08T04:29:40,905 WARN  [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-0_2/usercache/jenkins/appcache/application_1733631992429_0008/container_1733631992429_0008_01_000004/sysfs]
Error: java.io.IOException: Checksum mismatch between hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithChecksum/6782a3f2d630839ebc670863784f60b1/cf/dce2702640eb489a82b620d5702b7ccc and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/local-export-1733632155061/archive/data/default/testtb-testExportWithChecksum/6782a3f2d630839ebc670863784f60b1/cf/dce2702640eb489a82b620d5702b7ccc. Input and output filesystems are of different types.
Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different.
 Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks.
 (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.)

	at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:596)
	at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:332)
	at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:254)
	at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:180)
	at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145)
	at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800)
	at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348)
	at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178)
	at java.base/java.security.AccessController.doPrivileged(AccessController.java:712)
	at java.base/javax.security.auth.Subject.doAs(Subject.java:439)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953)
	at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172)

2024-12-08T04:29:42,306 INFO  [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733631992429_0008_000001 (auth:SIMPLE) from 127.0.0.1:56038
2024-12-08T04:29:45,468 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742282_1458 (size=21340)
2024-12-08T04:29:45,468 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742282_1458 (size=21340)
2024-12-08T04:29:45,470 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742282_1458 (size=21340)
2024-12-08T04:29:45,538 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742283_1459 (size=460)
2024-12-08T04:29:45,539 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742283_1459 (size=460)
2024-12-08T04:29:45,540 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742283_1459 (size=460)
2024-12-08T04:29:45,597 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742284_1460 (size=21340)
2024-12-08T04:29:45,597 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742284_1460 (size=21340)
2024-12-08T04:29:45,598 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742284_1460 (size=21340)
2024-12-08T04:29:45,628 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742285_1461 (size=350603)
2024-12-08T04:29:45,628 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742285_1461 (size=350603)
2024-12-08T04:29:45,629 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742285_1461 (size=350603)
2024-12-08T04:29:45,647 INFO  [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733631992429_0008_000001 (auth:SIMPLE) from 127.0.0.1:60246
2024-12-08T04:29:47,115 ERROR [Time-limited test {}] snapshot.ExportSnapshot(1227): Snapshot export failed
org.apache.hadoop.hbase.snapshot.ExportSnapshotException: Task failed task_1733631992429_0008_m_000000
Job failed as tasks failed. failedMaps:1 failedReduces:0 killedMaps:0 killedReduces: 0

	at org.apache.hadoop.hbase.snapshot.ExportSnapshot.runCopyJob(ExportSnapshot.java:935) ~[classes/:?]
	at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1204) ~[classes/:?]
	at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:151) ~[hbase-common-2.7.0-SNAPSHOT.jar:?]
	at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?]
	at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:523) ~[test-classes/:?]
	at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportFileSystemState(TestExportSnapshot.java:353) ~[test-classes/:?]
	at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportWithChecksum(TestExportSnapshot.java:237) ~[test-classes/:?]
	at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?]
	at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?]
	at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?]
	at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?]
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2]
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2]
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2]
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2]
	at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2]
	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2]
	at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2]
	at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2]
	at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2]
	at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2]
	at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2]
	at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2]
	at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2]
	at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2]
	at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2]
	at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2]
	at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2]
	at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2]
	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2]
	at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2]
	at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2]
	at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?]
	at java.lang.Thread.run(Thread.java:840) ~[?:?]
2024-12-08T04:29:47,116 INFO  [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632187116
2024-12-08T04:29:47,116 INFO  [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:41407, tgtDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632187116, rawTgtDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632187116, srcFsUri=hdfs://localhost:41407, srcDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720
2024-12-08T04:29:47,145 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:41407, inputRoot=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720
2024-12-08T04:29:47,145 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1548841327_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632187116, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632187116/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum
2024-12-08T04:29:47,148 INFO  [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity.
2024-12-08T04:29:47,151 INFO  [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/snaptb0-testExportWithChecksum to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632187116/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum
2024-12-08T04:29:47,160 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742287_1463 (size=621)
2024-12-08T04:29:47,160 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742286_1462 (size=156)
2024-12-08T04:29:47,160 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742286_1462 (size=156)
2024-12-08T04:29:47,160 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742287_1463 (size=621)
2024-12-08T04:29:47,160 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742286_1462 (size=156)
2024-12-08T04:29:47,161 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742287_1463 (size=621)
2024-12-08T04:29:47,162 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar
2024-12-08T04:29:47,163 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar
2024-12-08T04:29:47,163 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar
2024-12-08T04:29:47,163 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar
2024-12-08T04:29:48,161 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/hadoop-18006095626276013878.jar
2024-12-08T04:29:48,162 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar
2024-12-08T04:29:48,162 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar
2024-12-08T04:29:48,231 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/hadoop-14906298463821194161.jar
2024-12-08T04:29:48,232 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar
2024-12-08T04:29:48,232 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar
2024-12-08T04:29:48,232 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar
2024-12-08T04:29:48,232 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar
2024-12-08T04:29:48,233 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar
2024-12-08T04:29:48,233 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar
2024-12-08T04:29:48,233 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar
2024-12-08T04:29:48,233 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar
2024-12-08T04:29:48,233 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar
2024-12-08T04:29:48,234 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar
2024-12-08T04:29:48,234 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar
2024-12-08T04:29:48,234 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar
2024-12-08T04:29:48,234 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar
2024-12-08T04:29:48,234 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar
2024-12-08T04:29:48,234 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar
2024-12-08T04:29:48,235 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar
2024-12-08T04:29:48,235 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar
2024-12-08T04:29:48,235 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar
2024-12-08T04:29:48,235 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar
2024-12-08T04:29:48,235 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar
2024-12-08T04:29:48,236 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar
2024-12-08T04:29:48,236 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar
2024-12-08T04:29:48,236 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar
2024-12-08T04:29:48,236 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar
2024-12-08T04:29:48,236 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar
2024-12-08T04:29:48,288 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742288_1464 (size=127628)
2024-12-08T04:29:48,288 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742288_1464 (size=127628)
2024-12-08T04:29:48,289 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742288_1464 (size=127628)
2024-12-08T04:29:48,300 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742289_1465 (size=2172101)
2024-12-08T04:29:48,301 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742289_1465 (size=2172101)
2024-12-08T04:29:48,301 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742289_1465 (size=2172101)
2024-12-08T04:29:48,307 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742290_1466 (size=213228)
2024-12-08T04:29:48,307 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742290_1466 (size=213228)
2024-12-08T04:29:48,308 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742290_1466 (size=213228)
2024-12-08T04:29:48,319 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742291_1467 (size=1877034)
2024-12-08T04:29:48,320 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742291_1467 (size=1877034)
2024-12-08T04:29:48,320 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742291_1467 (size=1877034)
2024-12-08T04:29:48,327 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742292_1468 (size=533455)
2024-12-08T04:29:48,327 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742292_1468 (size=533455)
2024-12-08T04:29:48,328 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742292_1468 (size=533455)
2024-12-08T04:29:48,348 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742293_1469 (size=7280644)
2024-12-08T04:29:48,348 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742293_1469 (size=7280644)
2024-12-08T04:29:48,349 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742293_1469 (size=7280644)
2024-12-08T04:29:48,365 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742294_1470 (size=4188619)
2024-12-08T04:29:48,365 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742294_1470 (size=4188619)
2024-12-08T04:29:48,366 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742294_1470 (size=4188619)
2024-12-08T04:29:48,372 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742295_1471 (size=20406)
2024-12-08T04:29:48,372 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742295_1471 (size=20406)
2024-12-08T04:29:48,372 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742295_1471 (size=20406)
2024-12-08T04:29:48,378 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742296_1472 (size=75495)
2024-12-08T04:29:48,378 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742296_1472 (size=75495)
2024-12-08T04:29:48,378 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742296_1472 (size=75495)
2024-12-08T04:29:48,384 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742297_1473 (size=45609)
2024-12-08T04:29:48,384 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742297_1473 (size=45609)
2024-12-08T04:29:48,385 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742297_1473 (size=45609)
2024-12-08T04:29:48,391 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742298_1474 (size=110084)
2024-12-08T04:29:48,391 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742298_1474 (size=110084)
2024-12-08T04:29:48,391 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742298_1474 (size=110084)
2024-12-08T04:29:48,400 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742299_1475 (size=1323991)
2024-12-08T04:29:48,401 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742299_1475 (size=1323991)
2024-12-08T04:29:48,401 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742299_1475 (size=1323991)
2024-12-08T04:29:48,410 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742300_1476 (size=23076)
2024-12-08T04:29:48,410 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742300_1476 (size=23076)
2024-12-08T04:29:48,410 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742300_1476 (size=23076)
2024-12-08T04:29:48,416 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742301_1477 (size=126803)
2024-12-08T04:29:48,416 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742301_1477 (size=126803)
2024-12-08T04:29:48,417 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742301_1477 (size=126803)
2024-12-08T04:29:48,426 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742302_1478 (size=322274)
2024-12-08T04:29:48,426 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742302_1478 (size=322274)
2024-12-08T04:29:48,427 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742302_1478 (size=322274)
2024-12-08T04:29:48,437 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742303_1479 (size=1832290)
2024-12-08T04:29:48,437 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742303_1479 (size=1832290)
2024-12-08T04:29:48,437 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742303_1479 (size=1832290)
2024-12-08T04:29:48,464 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742304_1480 (size=6350155)
2024-12-08T04:29:48,464 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742304_1480 (size=6350155)
2024-12-08T04:29:48,464 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742304_1480 (size=6350155)
2024-12-08T04:29:48,471 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742305_1481 (size=30081)
2024-12-08T04:29:48,471 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742305_1481 (size=30081)
2024-12-08T04:29:48,472 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742305_1481 (size=30081)
2024-12-08T04:29:48,478 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742306_1482 (size=53616)
2024-12-08T04:29:48,478 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742306_1482 (size=53616)
2024-12-08T04:29:48,479 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742306_1482 (size=53616)
2024-12-08T04:29:48,486 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742307_1483 (size=451756)
2024-12-08T04:29:48,486 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742307_1483 (size=451756)
2024-12-08T04:29:48,486 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742307_1483 (size=451756)
2024-12-08T04:29:48,492 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742308_1484 (size=29229)
2024-12-08T04:29:48,493 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742308_1484 (size=29229)
2024-12-08T04:29:48,493 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742308_1484 (size=29229)
2024-12-08T04:29:48,499 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742309_1485 (size=169089)
2024-12-08T04:29:48,500 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742309_1485 (size=169089)
2024-12-08T04:29:48,500 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742309_1485 (size=169089)
2024-12-08T04:29:48,523 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742310_1486 (size=5175431)
2024-12-08T04:29:48,523 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742310_1486 (size=5175431)
2024-12-08T04:29:48,523 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742310_1486 (size=5175431)
2024-12-08T04:29:48,531 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742311_1487 (size=136454)
2024-12-08T04:29:48,531 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742311_1487 (size=136454)
2024-12-08T04:29:48,531 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742311_1487 (size=136454)
2024-12-08T04:29:48,556 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742312_1488 (size=907852)
2024-12-08T04:29:48,556 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742312_1488 (size=907852)
2024-12-08T04:29:48,556 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742312_1488 (size=907852)
2024-12-08T04:29:48,572 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742313_1489 (size=3317408)
2024-12-08T04:29:48,573 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742313_1489 (size=3317408)
2024-12-08T04:29:48,573 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742313_1489 (size=3317408)
2024-12-08T04:29:48,582 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742314_1490 (size=503880)
2024-12-08T04:29:48,582 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742314_1490 (size=503880)
2024-12-08T04:29:48,582 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742314_1490 (size=503880)
2024-12-08T04:29:48,604 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742315_1491 (size=4695811)
2024-12-08T04:29:48,604 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742315_1491 (size=4695811)
2024-12-08T04:29:48,604 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742315_1491 (size=4695811)
2024-12-08T04:29:48,605 WARN  [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set.  User classes may not be found. See Job or Job#setJar(String).
2024-12-08T04:29:48,607 INFO  [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list
2024-12-08T04:29:48,609 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K
2024-12-08T04:29:48,615 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742316_1492 (size=338)
2024-12-08T04:29:48,616 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742316_1492 (size=338)
2024-12-08T04:29:48,616 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742316_1492 (size=338)
2024-12-08T04:29:48,622 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742317_1493 (size=15)
2024-12-08T04:29:48,622 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742317_1493 (size=15)
2024-12-08T04:29:48,622 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742317_1493 (size=15)
2024-12-08T04:29:48,636 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742318_1494 (size=304883)
2024-12-08T04:29:48,636 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742318_1494 (size=304883)
2024-12-08T04:29:48,637 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742318_1494 (size=304883)
2024-12-08T04:29:50,697 WARN  [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-0_0/usercache/jenkins/appcache/application_1733631992429_0008/container_1733631992429_0008_01_000005/launch_container.sh]
2024-12-08T04:29:50,697 WARN  [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-0_0/usercache/jenkins/appcache/application_1733631992429_0008/container_1733631992429_0008_01_000005/container_tokens]
2024-12-08T04:29:50,697 WARN  [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-0_0/usercache/jenkins/appcache/application_1733631992429_0008/container_1733631992429_0008_01_000005/sysfs]
2024-12-08T04:29:51,724 WARN  [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start
2024-12-08T04:29:51,724 WARN  [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start
2024-12-08T04:29:51,729 INFO  [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733631992429_0008_000001 (auth:SIMPLE) from 127.0.0.1:60252
2024-12-08T04:29:51,739 WARN  [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-0_3/usercache/jenkins/appcache/application_1733631992429_0008/container_1733631992429_0008_01_000001/launch_container.sh]
2024-12-08T04:29:51,739 WARN  [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-0_3/usercache/jenkins/appcache/application_1733631992429_0008/container_1733631992429_0008_01_000001/container_tokens]
2024-12-08T04:29:51,739 WARN  [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-0_3/usercache/jenkins/appcache/application_1733631992429_0008/container_1733631992429_0008_01_000001/sysfs]
2024-12-08T04:29:51,954 INFO  [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733631992429_0009_000001 (auth:SIMPLE) from 127.0.0.1:45496
2024-12-08T04:29:52,004 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details.
2024-12-08T04:29:57,017 INFO  [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733631992429_0009_000001 (auth:SIMPLE) from 127.0.0.1:52002
2024-12-08T04:29:57,281 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742319_1495 (size=350557)
2024-12-08T04:29:57,282 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742319_1495 (size=350557)
2024-12-08T04:29:57,282 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742319_1495 (size=350557)
2024-12-08T04:29:59,131 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 6782a3f2d630839ebc670863784f60b1, had cached 0 bytes from a total of 8460
2024-12-08T04:29:59,132 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region c0239d354b1f92616e7e26cfd0ea5532, had cached 0 bytes from a total of 5149
2024-12-08T04:29:59,260 INFO  [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733631992429_0009_000001 (auth:SIMPLE) from 127.0.0.1:56290
2024-12-08T04:30:04,126 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742320_1496 (size=8460)
2024-12-08T04:30:04,126 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742320_1496 (size=8460)
2024-12-08T04:30:04,127 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742320_1496 (size=8460)
2024-12-08T04:30:04,256 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742321_1497 (size=5149)
2024-12-08T04:30:04,262 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742321_1497 (size=5149)
2024-12-08T04:30:04,262 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742321_1497 (size=5149)
2024-12-08T04:30:04,447 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742322_1498 (size=17413)
2024-12-08T04:30:04,448 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742322_1498 (size=17413)
2024-12-08T04:30:04,448 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742322_1498 (size=17413)
2024-12-08T04:30:04,485 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742323_1499 (size=462)
2024-12-08T04:30:04,486 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742323_1499 (size=462)
2024-12-08T04:30:04,486 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742323_1499 (size=462)
2024-12-08T04:30:04,495 WARN  [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-0_1/usercache/jenkins/appcache/application_1733631992429_0009/container_1733631992429_0009_01_000002/launch_container.sh]
2024-12-08T04:30:04,496 WARN  [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-0_1/usercache/jenkins/appcache/application_1733631992429_0009/container_1733631992429_0009_01_000002/container_tokens]
2024-12-08T04:30:04,496 WARN  [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-0_1/usercache/jenkins/appcache/application_1733631992429_0009/container_1733631992429_0009_01_000002/sysfs]
2024-12-08T04:30:04,555 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742324_1500 (size=17413)
2024-12-08T04:30:04,555 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742324_1500 (size=17413)
2024-12-08T04:30:04,556 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742324_1500 (size=17413)
2024-12-08T04:30:04,614 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742325_1501 (size=350557)
2024-12-08T04:30:04,615 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742325_1501 (size=350557)
2024-12-08T04:30:04,615 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742325_1501 (size=350557)
2024-12-08T04:30:04,636 INFO  [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733631992429_0009_000001 (auth:SIMPLE) from 127.0.0.1:55460
2024-12-08T04:30:05,828 INFO  [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export
2024-12-08T04:30:05,830 INFO  [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity.
2024-12-08T04:30:05,837 INFO  [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb0-testExportWithChecksum
2024-12-08T04:30:05,837 INFO  [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot
2024-12-08T04:30:05,838 INFO  [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state
2024-12-08T04:30:05,838 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1548841327_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/snaptb0-testExportWithChecksum
2024-12-08T04:30:05,838 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo
2024-12-08T04:30:05,838 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest
2024-12-08T04:30:05,839 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1548841327_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632187116/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632187116/.hbase-snapshot/snaptb0-testExportWithChecksum
2024-12-08T04:30:05,839 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632187116/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo
2024-12-08T04:30:05,839 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632187116/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest
2024-12-08T04:30:05,846 INFO  [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportWithChecksum
2024-12-08T04:30:05,846 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportWithChecksum
2024-12-08T04:30:05,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure2.ProcedureExecutor(1098): Stored pid=191, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportWithChecksum
2024-12-08T04:30:05,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=191
2024-12-08T04:30:05,849 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733632205849"}]},"ts":"1733632205849"}
2024-12-08T04:30:05,851 INFO  [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithChecksum, state=DISABLING in hbase:meta
2024-12-08T04:30:05,853 INFO  [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithChecksum to state=DISABLING
2024-12-08T04:30:05,854 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=192, ppid=191, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportWithChecksum}]
2024-12-08T04:30:05,855 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=193, ppid=192, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=c0239d354b1f92616e7e26cfd0ea5532, UNASSIGN}, {pid=194, ppid=192, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=6782a3f2d630839ebc670863784f60b1, UNASSIGN}]
2024-12-08T04:30:05,856 INFO  [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=194, ppid=192, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=6782a3f2d630839ebc670863784f60b1, UNASSIGN
2024-12-08T04:30:05,856 INFO  [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=193, ppid=192, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=c0239d354b1f92616e7e26cfd0ea5532, UNASSIGN
2024-12-08T04:30:05,857 INFO  [PEWorker-1 {}] assignment.RegionStateStore(202): pid=194 updating hbase:meta row=6782a3f2d630839ebc670863784f60b1, regionState=CLOSING, regionLocation=428ded7e54d6,46421,1733631984115
2024-12-08T04:30:05,857 INFO  [PEWorker-5 {}] assignment.RegionStateStore(202): pid=193 updating hbase:meta row=c0239d354b1f92616e7e26cfd0ea5532, regionState=CLOSING, regionLocation=428ded7e54d6,45955,1733631983994
2024-12-08T04:30:05,858 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false
2024-12-08T04:30:05,858 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=195, ppid=194, state=RUNNABLE; CloseRegionProcedure 6782a3f2d630839ebc670863784f60b1, server=428ded7e54d6,46421,1733631984115}]
2024-12-08T04:30:05,859 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false
2024-12-08T04:30:05,859 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=196, ppid=193, state=RUNNABLE; CloseRegionProcedure c0239d354b1f92616e7e26cfd0ea5532, server=428ded7e54d6,45955,1733631983994}]
2024-12-08T04:30:05,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=191
2024-12-08T04:30:06,010 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,46421,1733631984115
2024-12-08T04:30:06,011 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] handler.UnassignRegionHandler(124): Close 6782a3f2d630839ebc670863784f60b1
2024-12-08T04:30:06,011 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false
2024-12-08T04:30:06,011 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,45955,1733631983994
2024-12-08T04:30:06,011 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1681): Closing 6782a3f2d630839ebc670863784f60b1, disabling compactions & flushes
2024-12-08T04:30:06,011 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1703): Closing region testtb-testExportWithChecksum,1,1733632153748.6782a3f2d630839ebc670863784f60b1.
2024-12-08T04:30:06,011 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithChecksum,1,1733632153748.6782a3f2d630839ebc670863784f60b1.
2024-12-08T04:30:06,011 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithChecksum,1,1733632153748.6782a3f2d630839ebc670863784f60b1. after waiting 0 ms
2024-12-08T04:30:06,011 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithChecksum,1,1733632153748.6782a3f2d630839ebc670863784f60b1.
2024-12-08T04:30:06,011 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] handler.UnassignRegionHandler(124): Close c0239d354b1f92616e7e26cfd0ea5532
2024-12-08T04:30:06,011 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false
2024-12-08T04:30:06,011 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1681): Closing c0239d354b1f92616e7e26cfd0ea5532, disabling compactions & flushes
2024-12-08T04:30:06,011 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1703): Closing region testtb-testExportWithChecksum,,1733632153748.c0239d354b1f92616e7e26cfd0ea5532.
2024-12-08T04:30:06,011 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithChecksum,,1733632153748.c0239d354b1f92616e7e26cfd0ea5532.
2024-12-08T04:30:06,012 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithChecksum,,1733632153748.c0239d354b1f92616e7e26cfd0ea5532. after waiting 0 ms
2024-12-08T04:30:06,012 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithChecksum,,1733632153748.c0239d354b1f92616e7e26cfd0ea5532.
2024-12-08T04:30:06,026 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithChecksum/6782a3f2d630839ebc670863784f60b1/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1
2024-12-08T04:30:06,026 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithChecksum/c0239d354b1f92616e7e26cfd0ea5532/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1
2024-12-08T04:30:06,026 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController
2024-12-08T04:30:06,026 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController
2024-12-08T04:30:06,026 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1922): Closed testtb-testExportWithChecksum,,1733632153748.c0239d354b1f92616e7e26cfd0ea5532.
2024-12-08T04:30:06,026 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1922): Closed testtb-testExportWithChecksum,1,1733632153748.6782a3f2d630839ebc670863784f60b1.
2024-12-08T04:30:06,026 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1635): Region close journal for c0239d354b1f92616e7e26cfd0ea5532:

2024-12-08T04:30:06,026 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1635): Region close journal for 6782a3f2d630839ebc670863784f60b1:

2024-12-08T04:30:06,028 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] handler.UnassignRegionHandler(170): Closed 6782a3f2d630839ebc670863784f60b1
2024-12-08T04:30:06,028 INFO  [PEWorker-2 {}] assignment.RegionStateStore(202): pid=194 updating hbase:meta row=6782a3f2d630839ebc670863784f60b1, regionState=CLOSED
2024-12-08T04:30:06,028 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] handler.UnassignRegionHandler(170): Closed c0239d354b1f92616e7e26cfd0ea5532
2024-12-08T04:30:06,029 INFO  [PEWorker-3 {}] assignment.RegionStateStore(202): pid=193 updating hbase:meta row=c0239d354b1f92616e7e26cfd0ea5532, regionState=CLOSED
2024-12-08T04:30:06,031 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=195, resume processing ppid=194
2024-12-08T04:30:06,032 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=196, resume processing ppid=193
2024-12-08T04:30:06,032 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=196, ppid=193, state=SUCCESS; CloseRegionProcedure c0239d354b1f92616e7e26cfd0ea5532, server=428ded7e54d6,45955,1733631983994 in 171 msec
2024-12-08T04:30:06,032 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=195, ppid=194, state=SUCCESS; CloseRegionProcedure 6782a3f2d630839ebc670863784f60b1, server=428ded7e54d6,46421,1733631984115 in 172 msec
2024-12-08T04:30:06,032 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=194, ppid=192, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=6782a3f2d630839ebc670863784f60b1, UNASSIGN in 176 msec
2024-12-08T04:30:06,033 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=193, resume processing ppid=192
2024-12-08T04:30:06,033 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=193, ppid=192, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=c0239d354b1f92616e7e26cfd0ea5532, UNASSIGN in 177 msec
2024-12-08T04:30:06,035 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=192, resume processing ppid=191
2024-12-08T04:30:06,035 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=192, ppid=191, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportWithChecksum in 180 msec
2024-12-08T04:30:06,036 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733632206036"}]},"ts":"1733632206036"}
2024-12-08T04:30:06,037 INFO  [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithChecksum, state=DISABLED in hbase:meta
2024-12-08T04:30:06,039 INFO  [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithChecksum to state=DISABLED
2024-12-08T04:30:06,040 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=191, state=SUCCESS; DisableTableProcedure table=testtb-testExportWithChecksum in 193 msec
2024-12-08T04:30:06,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=191
2024-12-08T04:30:06,151 INFO  [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportWithChecksum, procId: 191 completed
2024-12-08T04:30:06,151 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportWithChecksum
2024-12-08T04:30:06,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure2.ProcedureExecutor(1098): Stored pid=197, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportWithChecksum
2024-12-08T04:30:06,154 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=197, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportWithChecksum
2024-12-08T04:30:06,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportWithChecksum
2024-12-08T04:30:06,154 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=197, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportWithChecksum
2024-12-08T04:30:06,157 INFO  [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41743 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportWithChecksum
2024-12-08T04:30:06,157 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithChecksum/c0239d354b1f92616e7e26cfd0ea5532
2024-12-08T04:30:06,158 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithChecksum/6782a3f2d630839ebc670863784f60b1
2024-12-08T04:30:06,159 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithChecksum/6782a3f2d630839ebc670863784f60b1/cf, FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithChecksum/6782a3f2d630839ebc670863784f60b1/recovered.edits]
2024-12-08T04:30:06,159 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithChecksum/c0239d354b1f92616e7e26cfd0ea5532/cf, FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithChecksum/c0239d354b1f92616e7e26cfd0ea5532/recovered.edits]
2024-12-08T04:30:06,161 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum
2024-12-08T04:30:06,161 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum
2024-12-08T04:30:06,161 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum
2024-12-08T04:30:06,161 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46421-0x1006fe072e80002, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum
2024-12-08T04:30:06,162 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:30:06,162 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:30:06,162 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data null
2024-12-08T04:30:06,163 INFO  [zk-permission-watcher-pool-0 {}] access.AuthManager(135): Skipping permission cache refresh because writable data is empty
2024-12-08T04:30:06,165 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF
2024-12-08T04:30:06,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46421-0x1006fe072e80002, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum
2024-12-08T04:30:06,165 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF
2024-12-08T04:30:06,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum
2024-12-08T04:30:06,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46421-0x1006fe072e80002, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:30:06,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:30:06,165 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data null
2024-12-08T04:30:06,165 INFO  [zk-permission-watcher-pool-0 {}] access.AuthManager(135): Skipping permission cache refresh because writable data is empty
2024-12-08T04:30:06,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=197
2024-12-08T04:30:06,166 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04
2024-12-08T04:30:06,167 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04
2024-12-08T04:30:06,167 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04
2024-12-08T04:30:06,167 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04
2024-12-08T04:30:06,170 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithChecksum/c0239d354b1f92616e7e26cfd0ea5532/cf/c1b10700cb3f441e8606444718a202b3 to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/archive/data/default/testtb-testExportWithChecksum/c0239d354b1f92616e7e26cfd0ea5532/cf/c1b10700cb3f441e8606444718a202b3
2024-12-08T04:30:06,170 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithChecksum/6782a3f2d630839ebc670863784f60b1/cf/dce2702640eb489a82b620d5702b7ccc to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/archive/data/default/testtb-testExportWithChecksum/6782a3f2d630839ebc670863784f60b1/cf/dce2702640eb489a82b620d5702b7ccc
2024-12-08T04:30:06,173 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithChecksum/6782a3f2d630839ebc670863784f60b1/recovered.edits/9.seqid to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/archive/data/default/testtb-testExportWithChecksum/6782a3f2d630839ebc670863784f60b1/recovered.edits/9.seqid
2024-12-08T04:30:06,173 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithChecksum/c0239d354b1f92616e7e26cfd0ea5532/recovered.edits/9.seqid to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/archive/data/default/testtb-testExportWithChecksum/c0239d354b1f92616e7e26cfd0ea5532/recovered.edits/9.seqid
2024-12-08T04:30:06,174 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithChecksum/6782a3f2d630839ebc670863784f60b1
2024-12-08T04:30:06,174 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportWithChecksum/c0239d354b1f92616e7e26cfd0ea5532
2024-12-08T04:30:06,174 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithChecksum regions
2024-12-08T04:30:06,176 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=197, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportWithChecksum
2024-12-08T04:30:06,184 WARN  [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithChecksum from hbase:meta
2024-12-08T04:30:06,187 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithChecksum' descriptor.
2024-12-08T04:30:06,188 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=197, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportWithChecksum
2024-12-08T04:30:06,188 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithChecksum' from region states.
2024-12-08T04:30:06,188 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,,1733632153748.c0239d354b1f92616e7e26cfd0ea5532.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733632206188"}]},"ts":"9223372036854775807"}
2024-12-08T04:30:06,188 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,1,1733632153748.6782a3f2d630839ebc670863784f60b1.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733632206188"}]},"ts":"9223372036854775807"}
2024-12-08T04:30:06,190 INFO  [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META
2024-12-08T04:30:06,190 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => c0239d354b1f92616e7e26cfd0ea5532, NAME => 'testtb-testExportWithChecksum,,1733632153748.c0239d354b1f92616e7e26cfd0ea5532.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 6782a3f2d630839ebc670863784f60b1, NAME => 'testtb-testExportWithChecksum,1,1733632153748.6782a3f2d630839ebc670863784f60b1.', STARTKEY => '1', ENDKEY => ''}]
2024-12-08T04:30:06,190 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithChecksum' as deleted.
2024-12-08T04:30:06,190 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733632206190"}]},"ts":"9223372036854775807"}
2024-12-08T04:30:06,192 INFO  [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportWithChecksum state from META
2024-12-08T04:30:06,194 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=197, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportWithChecksum
2024-12-08T04:30:06,195 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=197, state=SUCCESS; DeleteTableProcedure table=testtb-testExportWithChecksum in 42 msec
2024-12-08T04:30:06,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=197
2024-12-08T04:30:06,267 INFO  [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportWithChecksum, procId: 197 completed
2024-12-08T04:30:06,273 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithChecksum"

2024-12-08T04:30:06,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportWithChecksum
2024-12-08T04:30:06,276 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithChecksum"

2024-12-08T04:30:06,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportWithChecksum
2024-12-08T04:30:06,302 INFO  [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithChecksum Thread=802 (was 808), OpenFileDescriptor=810 (was 821), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=431 (was 537), ProcessCount=17 (was 17), AvailableMemoryMB=2757 (was 3290)
2024-12-08T04:30:06,302 WARN  [Time-limited test {}] hbase.ResourceChecker(130): Thread=802 is superior to 500
2024-12-08T04:30:06,322 INFO  [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=802, OpenFileDescriptor=810, MaxFileDescriptor=1048576, SystemLoadAverage=431, ProcessCount=17, AvailableMemoryMB=2756
2024-12-08T04:30:06,322 WARN  [Time-limited test {}] hbase.ResourceChecker(130): Thread=802 is superior to 500
2024-12-08T04:30:06,323 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}
2024-12-08T04:30:06,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure2.ProcedureExecutor(1098): Stored pid=198, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp
2024-12-08T04:30:06,325 INFO  [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_PRE_OPERATION
2024-12-08T04:30:06,325 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker
2024-12-08T04:30:06,325 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default"
qualifier: "testtb-testExportFileSystemStateWithSkipTmp"
 procId is: 198
2024-12-08T04:30:06,326 INFO  [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_WRITE_FS_LAYOUT
2024-12-08T04:30:06,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=198
2024-12-08T04:30:06,332 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742326_1502 (size=418)
2024-12-08T04:30:06,332 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742326_1502 (size=418)
2024-12-08T04:30:06,332 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742326_1502 (size=418)
2024-12-08T04:30:06,334 INFO  [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 976b979028e460504e292af924e9f145, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1733632206323.976b979028e460504e292af924e9f145.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720
2024-12-08T04:30:06,334 INFO  [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 6a4f358fa009e05565077924c50ddc69, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1733632206323.6a4f358fa009e05565077924c50ddc69.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720
2024-12-08T04:30:06,340 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742327_1503 (size=79)
2024-12-08T04:30:06,341 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742328_1504 (size=79)
2024-12-08T04:30:06,341 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742327_1503 (size=79)
2024-12-08T04:30:06,341 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742327_1503 (size=79)
2024-12-08T04:30:06,342 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742328_1504 (size=79)
2024-12-08T04:30:06,342 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742328_1504 (size=79)
2024-12-08T04:30:06,342 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1733632206323.976b979028e460504e292af924e9f145.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable
2024-12-08T04:30:06,342 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1681): Closing 976b979028e460504e292af924e9f145, disabling compactions & flushes
2024-12-08T04:30:06,342 INFO  [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1733632206323.976b979028e460504e292af924e9f145.
2024-12-08T04:30:06,342 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733632206323.976b979028e460504e292af924e9f145.
2024-12-08T04:30:06,342 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1733632206323.6a4f358fa009e05565077924c50ddc69.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable
2024-12-08T04:30:06,342 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733632206323.976b979028e460504e292af924e9f145. after waiting 0 ms
2024-12-08T04:30:06,342 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1733632206323.976b979028e460504e292af924e9f145.
2024-12-08T04:30:06,342 INFO  [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithSkipTmp,,1733632206323.976b979028e460504e292af924e9f145.
2024-12-08T04:30:06,342 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1681): Closing 6a4f358fa009e05565077924c50ddc69, disabling compactions & flushes
2024-12-08T04:30:06,342 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1635): Region close journal for 976b979028e460504e292af924e9f145:

2024-12-08T04:30:06,343 INFO  [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1733632206323.6a4f358fa009e05565077924c50ddc69.
2024-12-08T04:30:06,343 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733632206323.6a4f358fa009e05565077924c50ddc69.
2024-12-08T04:30:06,343 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733632206323.6a4f358fa009e05565077924c50ddc69. after waiting 0 ms
2024-12-08T04:30:06,343 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1733632206323.6a4f358fa009e05565077924c50ddc69.
2024-12-08T04:30:06,343 INFO  [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1733632206323.6a4f358fa009e05565077924c50ddc69.
2024-12-08T04:30:06,343 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1635): Region close journal for 6a4f358fa009e05565077924c50ddc69:

2024-12-08T04:30:06,343 INFO  [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ADD_TO_META
2024-12-08T04:30:06,344 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1733632206323.976b979028e460504e292af924e9f145.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1733632206344"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733632206344"}]},"ts":"1733632206344"}
2024-12-08T04:30:06,344 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1733632206323.6a4f358fa009e05565077924c50ddc69.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1733632206344"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733632206344"}]},"ts":"1733632206344"}
2024-12-08T04:30:06,346 INFO  [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta.
2024-12-08T04:30:06,346 INFO  [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ASSIGN_REGIONS
2024-12-08T04:30:06,346 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733632206346"}]},"ts":"1733632206346"}
2024-12-08T04:30:06,347 INFO  [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLING in hbase:meta
2024-12-08T04:30:06,350 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(202): Hosts are {428ded7e54d6=0} racks are {/default-rack=0}
2024-12-08T04:30:06,351 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 is on host 0
2024-12-08T04:30:06,351 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 is on host 0
2024-12-08T04:30:06,351 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 is on host 0
2024-12-08T04:30:06,351 INFO  [PEWorker-5 {}] balancer.BalancerClusterState(314): server 0 is on rack 0
2024-12-08T04:30:06,351 INFO  [PEWorker-5 {}] balancer.BalancerClusterState(314): server 1 is on rack 0
2024-12-08T04:30:06,351 INFO  [PEWorker-5 {}] balancer.BalancerClusterState(314): server 2 is on rack 0
2024-12-08T04:30:06,351 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1
2024-12-08T04:30:06,352 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=199, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=976b979028e460504e292af924e9f145, ASSIGN}, {pid=200, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=6a4f358fa009e05565077924c50ddc69, ASSIGN}]
2024-12-08T04:30:06,352 INFO  [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=200, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=6a4f358fa009e05565077924c50ddc69, ASSIGN
2024-12-08T04:30:06,352 INFO  [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=199, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=976b979028e460504e292af924e9f145, ASSIGN
2024-12-08T04:30:06,353 INFO  [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=200, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=6a4f358fa009e05565077924c50ddc69, ASSIGN; state=OFFLINE, location=428ded7e54d6,46421,1733631984115; forceNewPlan=false, retain=false
2024-12-08T04:30:06,353 INFO  [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=199, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=976b979028e460504e292af924e9f145, ASSIGN; state=OFFLINE, location=428ded7e54d6,41743,1733631984189; forceNewPlan=false, retain=false
2024-12-08T04:30:06,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=198
2024-12-08T04:30:06,503 INFO  [428ded7e54d6:46337 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 
2024-12-08T04:30:06,504 INFO  [PEWorker-3 {}] assignment.RegionStateStore(202): pid=200 updating hbase:meta row=6a4f358fa009e05565077924c50ddc69, regionState=OPENING, regionLocation=428ded7e54d6,46421,1733631984115
2024-12-08T04:30:06,504 INFO  [PEWorker-2 {}] assignment.RegionStateStore(202): pid=199 updating hbase:meta row=976b979028e460504e292af924e9f145, regionState=OPENING, regionLocation=428ded7e54d6,41743,1733631984189
2024-12-08T04:30:06,505 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=201, ppid=200, state=RUNNABLE; OpenRegionProcedure 6a4f358fa009e05565077924c50ddc69, server=428ded7e54d6,46421,1733631984115}]
2024-12-08T04:30:06,506 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=202, ppid=199, state=RUNNABLE; OpenRegionProcedure 976b979028e460504e292af924e9f145, server=428ded7e54d6,41743,1733631984189}]
2024-12-08T04:30:06,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=198
2024-12-08T04:30:06,657 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,46421,1733631984115
2024-12-08T04:30:06,658 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,41743,1733631984189
2024-12-08T04:30:06,659 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithSkipTmp,1,1733632206323.6a4f358fa009e05565077924c50ddc69.
2024-12-08T04:30:06,660 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(7285): Opening region: {ENCODED => 6a4f358fa009e05565077924c50ddc69, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1733632206323.6a4f358fa009e05565077924c50ddc69.', STARTKEY => '1', ENDKEY => ''}
2024-12-08T04:30:06,660 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,1,1733632206323.6a4f358fa009e05565077924c50ddc69. service=AccessControlService
2024-12-08T04:30:06,660 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911.
2024-12-08T04:30:06,660 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp 6a4f358fa009e05565077924c50ddc69
2024-12-08T04:30:06,660 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1733632206323.6a4f358fa009e05565077924c50ddc69.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable
2024-12-08T04:30:06,661 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(7327): checking encryption for 6a4f358fa009e05565077924c50ddc69
2024-12-08T04:30:06,661 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(7330): checking classloading for 6a4f358fa009e05565077924c50ddc69
2024-12-08T04:30:06,661 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithSkipTmp,,1733632206323.976b979028e460504e292af924e9f145.
2024-12-08T04:30:06,661 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(7285): Opening region: {ENCODED => 976b979028e460504e292af924e9f145, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1733632206323.976b979028e460504e292af924e9f145.', STARTKEY => '', ENDKEY => '1'}
2024-12-08T04:30:06,661 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,,1733632206323.976b979028e460504e292af924e9f145. service=AccessControlService
2024-12-08T04:30:06,661 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911.
2024-12-08T04:30:06,661 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp 976b979028e460504e292af924e9f145
2024-12-08T04:30:06,662 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1733632206323.976b979028e460504e292af924e9f145.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable
2024-12-08T04:30:06,662 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(7327): checking encryption for 976b979028e460504e292af924e9f145
2024-12-08T04:30:06,662 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(7330): checking classloading for 976b979028e460504e292af924e9f145
2024-12-08T04:30:06,662 INFO  [StoreOpener-6a4f358fa009e05565077924c50ddc69-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 6a4f358fa009e05565077924c50ddc69 
2024-12-08T04:30:06,663 INFO  [StoreOpener-976b979028e460504e292af924e9f145-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 976b979028e460504e292af924e9f145 
2024-12-08T04:30:06,663 INFO  [StoreOpener-6a4f358fa009e05565077924c50ddc69-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6a4f358fa009e05565077924c50ddc69 columnFamilyName cf
2024-12-08T04:30:06,663 DEBUG [StoreOpener-6a4f358fa009e05565077924c50ddc69-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker
2024-12-08T04:30:06,664 INFO  [StoreOpener-6a4f358fa009e05565077924c50ddc69-1 {}] regionserver.HStore(327): Store=6a4f358fa009e05565077924c50ddc69/cf,  memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE
2024-12-08T04:30:06,664 INFO  [StoreOpener-976b979028e460504e292af924e9f145-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 976b979028e460504e292af924e9f145 columnFamilyName cf
2024-12-08T04:30:06,664 DEBUG [StoreOpener-976b979028e460504e292af924e9f145-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker
2024-12-08T04:30:06,664 INFO  [StoreOpener-976b979028e460504e292af924e9f145-1 {}] regionserver.HStore(327): Store=976b979028e460504e292af924e9f145/cf,  memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE
2024-12-08T04:30:06,665 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithSkipTmp/6a4f358fa009e05565077924c50ddc69
2024-12-08T04:30:06,665 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithSkipTmp/976b979028e460504e292af924e9f145
2024-12-08T04:30:06,665 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithSkipTmp/6a4f358fa009e05565077924c50ddc69
2024-12-08T04:30:06,665 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithSkipTmp/976b979028e460504e292af924e9f145
2024-12-08T04:30:06,667 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(1085): writing seq id for 976b979028e460504e292af924e9f145
2024-12-08T04:30:06,667 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(1085): writing seq id for 6a4f358fa009e05565077924c50ddc69
2024-12-08T04:30:06,669 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithSkipTmp/6a4f358fa009e05565077924c50ddc69/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1
2024-12-08T04:30:06,669 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithSkipTmp/976b979028e460504e292af924e9f145/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1
2024-12-08T04:30:06,671 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(1102): Opened 6a4f358fa009e05565077924c50ddc69; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66729042, jitterRate=-0.0056597888469696045}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1}
2024-12-08T04:30:06,671 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(1102): Opened 976b979028e460504e292af924e9f145; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62163091, jitterRate=-0.0736977607011795}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1}
2024-12-08T04:30:06,672 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(1001): Region open journal for 6a4f358fa009e05565077924c50ddc69:

2024-12-08T04:30:06,672 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(1001): Region open journal for 976b979028e460504e292af924e9f145:

2024-12-08T04:30:06,673 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,,1733632206323.976b979028e460504e292af924e9f145., pid=202, masterSystemTime=1733632206658
2024-12-08T04:30:06,673 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,1,1733632206323.6a4f358fa009e05565077924c50ddc69., pid=201, masterSystemTime=1733632206656
2024-12-08T04:30:06,674 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,1,1733632206323.6a4f358fa009e05565077924c50ddc69.
2024-12-08T04:30:06,674 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithSkipTmp,1,1733632206323.6a4f358fa009e05565077924c50ddc69.
2024-12-08T04:30:06,675 INFO  [PEWorker-1 {}] assignment.RegionStateStore(202): pid=200 updating hbase:meta row=6a4f358fa009e05565077924c50ddc69, regionState=OPEN, openSeqNum=2, regionLocation=428ded7e54d6,46421,1733631984115
2024-12-08T04:30:06,675 DEBUG [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,,1733632206323.976b979028e460504e292af924e9f145.
2024-12-08T04:30:06,675 INFO  [RS_OPEN_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithSkipTmp,,1733632206323.976b979028e460504e292af924e9f145.
2024-12-08T04:30:06,675 INFO  [PEWorker-3 {}] assignment.RegionStateStore(202): pid=199 updating hbase:meta row=976b979028e460504e292af924e9f145, regionState=OPEN, openSeqNum=2, regionLocation=428ded7e54d6,41743,1733631984189
2024-12-08T04:30:06,678 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=201, resume processing ppid=200
2024-12-08T04:30:06,678 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=201, ppid=200, state=SUCCESS; OpenRegionProcedure 6a4f358fa009e05565077924c50ddc69, server=428ded7e54d6,46421,1733631984115 in 171 msec
2024-12-08T04:30:06,679 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=202, resume processing ppid=199
2024-12-08T04:30:06,679 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=202, ppid=199, state=SUCCESS; OpenRegionProcedure 976b979028e460504e292af924e9f145, server=428ded7e54d6,41743,1733631984189 in 171 msec
2024-12-08T04:30:06,679 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=200, ppid=198, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=6a4f358fa009e05565077924c50ddc69, ASSIGN in 327 msec
2024-12-08T04:30:06,680 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=199, resume processing ppid=198
2024-12-08T04:30:06,680 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=199, ppid=198, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=976b979028e460504e292af924e9f145, ASSIGN in 328 msec
2024-12-08T04:30:06,680 INFO  [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_UPDATE_DESC_CACHE
2024-12-08T04:30:06,681 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733632206681"}]},"ts":"1733632206681"}
2024-12-08T04:30:06,681 INFO  [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLED in hbase:meta
2024-12-08T04:30:06,727 INFO  [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_POST_OPERATION
2024-12-08T04:30:06,727 DEBUG [PEWorker-4 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportFileSystemStateWithSkipTmp jenkins: RWXCA
2024-12-08T04:30:06,729 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41743 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA]
2024-12-08T04:30:06,760 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:30:06,760 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:30:06,760 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:30:06,760 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46421-0x1006fe072e80002, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:30:06,766 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04
2024-12-08T04:30:06,766 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04
2024-12-08T04:30:06,766 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04
2024-12-08T04:30:06,766 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04
2024-12-08T04:30:06,766 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04
2024-12-08T04:30:06,766 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04
2024-12-08T04:30:06,767 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04
2024-12-08T04:30:06,767 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04
2024-12-08T04:30:06,767 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=198, state=SUCCESS; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 442 msec
2024-12-08T04:30:06,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=198
2024-12-08T04:30:06,929 INFO  [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp, procId: 198 completed
2024-12-08T04:30:06,929 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testExportFileSystemStateWithSkipTmp get assigned. Timeout = 60000ms
2024-12-08T04:30:06,929 INFO  [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1])
2024-12-08T04:30:06,932 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41743 {}] regionserver.StoreScanner(1133): Switch to stream read (scanned=32795 bytes) of info
2024-12-08T04:30:06,936 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testExportFileSystemStateWithSkipTmp assigned to meta. Checking AM states.
2024-12-08T04:30:06,936 INFO  [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1])
2024-12-08T04:30:06,936 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testExportFileSystemStateWithSkipTmp assigned.
2024-12-08T04:30:06,938 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }
2024-12-08T04:30:06,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733632206939 (current time:1733632206939).
2024-12-08T04:30:06,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0
2024-12-08T04:30:06,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2
2024-12-08T04:30:06,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot
2024-12-08T04:30:06,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1e492f85 to 127.0.0.1:55878 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@51a2c727
2024-12-08T04:30:06,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@223cf4a3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null
2024-12-08T04:30:06,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false
2024-12-08T04:30:06,947 INFO  [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42466, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService
2024-12-08T04:30:06,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1e492f85 to 127.0.0.1:55878
2024-12-08T04:30:06,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(514): Stopping rpc client
2024-12-08T04:30:06,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x31b7da6a to 127.0.0.1:55878 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@766ba884
2024-12-08T04:30:06,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@33b7c306, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null
2024-12-08T04:30:06,964 DEBUG [hconnection-0x7a0a55a6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false
2024-12-08T04:30:06,965 INFO  [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42468, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService
2024-12-08T04:30:06,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x31b7da6a to 127.0.0.1:55878
2024-12-08T04:30:06,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(514): Stopping rpc client
2024-12-08T04:30:06,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA]
2024-12-08T04:30:06,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot...
2024-12-08T04:30:06,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure2.ProcedureExecutor(1098): Stored pid=203, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }
2024-12-08T04:30:06,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 203
2024-12-08T04:30:06,970 INFO  [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE
2024-12-08T04:30:06,971 INFO  [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION
2024-12-08T04:30:06,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=203
2024-12-08T04:30:06,978 INFO  [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO
2024-12-08T04:30:06,994 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742329_1505 (size=203)
2024-12-08T04:30:06,994 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742329_1505 (size=203)
2024-12-08T04:30:06,995 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742329_1505 (size=203)
2024-12-08T04:30:06,996 INFO  [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS
2024-12-08T04:30:06,996 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=204, ppid=203, state=RUNNABLE; SnapshotRegionProcedure 976b979028e460504e292af924e9f145}, {pid=205, ppid=203, state=RUNNABLE; SnapshotRegionProcedure 6a4f358fa009e05565077924c50ddc69}]
2024-12-08T04:30:06,997 INFO  [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=205, ppid=203, state=RUNNABLE; SnapshotRegionProcedure 6a4f358fa009e05565077924c50ddc69
2024-12-08T04:30:06,997 INFO  [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=204, ppid=203, state=RUNNABLE; SnapshotRegionProcedure 976b979028e460504e292af924e9f145
2024-12-08T04:30:07,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=203
2024-12-08T04:30:07,148 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,46421,1733631984115
2024-12-08T04:30:07,148 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,41743,1733631984189
2024-12-08T04:30:07,149 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46421 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=205
2024-12-08T04:30:07,149 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41743 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=204
2024-12-08T04:30:07,149 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733632206323.976b979028e460504e292af924e9f145.
2024-12-08T04:30:07,149 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733632206323.6a4f358fa009e05565077924c50ddc69.
2024-12-08T04:30:07,149 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegion(2538): Flush status journal for 6a4f358fa009e05565077924c50ddc69:

2024-12-08T04:30:07,149 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegion(2538): Flush status journal for 976b979028e460504e292af924e9f145:

2024-12-08T04:30:07,149 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1733632206323.6a4f358fa009e05565077924c50ddc69. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed.
2024-12-08T04:30:07,149 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1733632206323.976b979028e460504e292af924e9f145. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed.
2024-12-08T04:30:07,150 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1733632206323.976b979028e460504e292af924e9f145.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp
2024-12-08T04:30:07,150 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1733632206323.6a4f358fa009e05565077924c50ddc69.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp
2024-12-08T04:30:07,150 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(246): Creating references for hfiles
2024-12-08T04:30:07,150 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(246): Creating references for hfiles
2024-12-08T04:30:07,150 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles
2024-12-08T04:30:07,150 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles
2024-12-08T04:30:07,165 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742330_1506 (size=82)
2024-12-08T04:30:07,165 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742330_1506 (size=82)
2024-12-08T04:30:07,165 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742330_1506 (size=82)
2024-12-08T04:30:07,166 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733632206323.976b979028e460504e292af924e9f145.
2024-12-08T04:30:07,166 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=204
2024-12-08T04:30:07,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster(4106): Remote procedure done, pid=204
2024-12-08T04:30:07,167 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region 976b979028e460504e292af924e9f145
2024-12-08T04:30:07,167 INFO  [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=204, ppid=203, state=RUNNABLE; SnapshotRegionProcedure 976b979028e460504e292af924e9f145
2024-12-08T04:30:07,167 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742331_1507 (size=82)
2024-12-08T04:30:07,167 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742331_1507 (size=82)
2024-12-08T04:30:07,168 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742331_1507 (size=82)
2024-12-08T04:30:07,168 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733632206323.6a4f358fa009e05565077924c50ddc69.
2024-12-08T04:30:07,168 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=205
2024-12-08T04:30:07,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster(4106): Remote procedure done, pid=205
2024-12-08T04:30:07,169 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region 6a4f358fa009e05565077924c50ddc69
2024-12-08T04:30:07,169 INFO  [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=205, ppid=203, state=RUNNABLE; SnapshotRegionProcedure 6a4f358fa009e05565077924c50ddc69
2024-12-08T04:30:07,170 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=204, ppid=203, state=SUCCESS; SnapshotRegionProcedure 976b979028e460504e292af924e9f145 in 172 msec
2024-12-08T04:30:07,172 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=205, resume processing ppid=203
2024-12-08T04:30:07,172 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=205, ppid=203, state=SUCCESS; SnapshotRegionProcedure 6a4f358fa009e05565077924c50ddc69 in 174 msec
2024-12-08T04:30:07,172 INFO  [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS
2024-12-08T04:30:07,173 INFO  [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION
2024-12-08T04:30:07,173 INFO  [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT
2024-12-08T04:30:07,173 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithSkipTmp
2024-12-08T04:30:07,174 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp
2024-12-08T04:30:07,192 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742332_1508 (size=585)
2024-12-08T04:30:07,192 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742332_1508 (size=585)
2024-12-08T04:30:07,192 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742332_1508 (size=585)
2024-12-08T04:30:07,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=203
2024-12-08T04:30:07,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=203
2024-12-08T04:30:07,594 INFO  [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT
2024-12-08T04:30:07,598 INFO  [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT
2024-12-08T04:30:07,599 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithSkipTmp
2024-12-08T04:30:07,611 INFO  [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION
2024-12-08T04:30:07,611 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 203
2024-12-08T04:30:07,613 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=203, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 643 msec
2024-12-08T04:30:08,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=203
2024-12-08T04:30:08,075 INFO  [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp, procId: 203 completed
2024-12-08T04:30:08,081 INFO  [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41743 {}] regionserver.HRegion(8254): writing data to region testtb-testExportFileSystemStateWithSkipTmp,,1733632206323.976b979028e460504e292af924e9f145. with WAL disabled. Data may be lost in the event of a crash.
2024-12-08T04:30:08,084 INFO  [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46421 {}] regionserver.HRegion(8254): writing data to region testtb-testExportFileSystemStateWithSkipTmp,1,1733632206323.6a4f358fa009e05565077924c50ddc69. with WAL disabled. Data may be lost in the event of a crash.
2024-12-08T04:30:08,087 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportFileSystemStateWithSkipTmp
2024-12-08T04:30:08,087 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportFileSystemStateWithSkipTmp,,1733632206323.976b979028e460504e292af924e9f145.
2024-12-08T04:30:08,087 INFO  [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1])
2024-12-08T04:30:08,098 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }
2024-12-08T04:30:08,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733632208098 (current time:1733632208098).
2024-12-08T04:30:08,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0
2024-12-08T04:30:08,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2
2024-12-08T04:30:08,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot
2024-12-08T04:30:08,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x53475a8e to 127.0.0.1:55878 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7bc88225
2024-12-08T04:30:08,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@371d0d7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null
2024-12-08T04:30:08,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false
2024-12-08T04:30:08,104 INFO  [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42470, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService
2024-12-08T04:30:08,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x53475a8e to 127.0.0.1:55878
2024-12-08T04:30:08,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(514): Stopping rpc client
2024-12-08T04:30:08,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1cf55a2e to 127.0.0.1:55878 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@23933a3f
2024-12-08T04:30:08,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e69a1f2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null
2024-12-08T04:30:08,110 DEBUG [hconnection-0x24857731-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false
2024-12-08T04:30:08,111 INFO  [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42478, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService
2024-12-08T04:30:08,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1cf55a2e to 127.0.0.1:55878
2024-12-08T04:30:08,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] ipc.AbstractRpcClient(514): Stopping rpc client
2024-12-08T04:30:08,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA]
2024-12-08T04:30:08,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot...
2024-12-08T04:30:08,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure2.ProcedureExecutor(1098): Stored pid=206, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }
2024-12-08T04:30:08,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 206
2024-12-08T04:30:08,115 INFO  [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE
2024-12-08T04:30:08,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=206
2024-12-08T04:30:08,115 INFO  [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION
2024-12-08T04:30:08,117 INFO  [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO
2024-12-08T04:30:08,123 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742333_1509 (size=198)
2024-12-08T04:30:08,123 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742333_1509 (size=198)
2024-12-08T04:30:08,123 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742333_1509 (size=198)
2024-12-08T04:30:08,124 INFO  [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS
2024-12-08T04:30:08,124 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=207, ppid=206, state=RUNNABLE; SnapshotRegionProcedure 976b979028e460504e292af924e9f145}, {pid=208, ppid=206, state=RUNNABLE; SnapshotRegionProcedure 6a4f358fa009e05565077924c50ddc69}]
2024-12-08T04:30:08,125 INFO  [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=208, ppid=206, state=RUNNABLE; SnapshotRegionProcedure 6a4f358fa009e05565077924c50ddc69
2024-12-08T04:30:08,125 INFO  [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=207, ppid=206, state=RUNNABLE; SnapshotRegionProcedure 976b979028e460504e292af924e9f145
2024-12-08T04:30:08,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=206
2024-12-08T04:30:08,226 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSkipTmp'
2024-12-08T04:30:08,276 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,41743,1733631984189
2024-12-08T04:30:08,276 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,46421,1733631984115
2024-12-08T04:30:08,276 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41743 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=207
2024-12-08T04:30:08,276 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46421 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=208
2024-12-08T04:30:08,277 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733632206323.976b979028e460504e292af924e9f145.
2024-12-08T04:30:08,277 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733632206323.6a4f358fa009e05565077924c50ddc69.
2024-12-08T04:30:08,277 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.HRegion(2837): Flushing 6a4f358fa009e05565077924c50ddc69 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB
2024-12-08T04:30:08,277 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.HRegion(2837): Flushing 976b979028e460504e292af924e9f145 1/1 column families, dataSize=199 B heapSize=688 B
2024-12-08T04:30:08,302 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithSkipTmp/976b979028e460504e292af924e9f145/.tmp/cf/7cfbb8f4cbb9465c8f114512c6e99ba9 is 71, key is 0240b9eb03d2435f97ca83f2e23d28dc/cf:q/1733632208081/Put/seqid=0
2024-12-08T04:30:08,304 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithSkipTmp/6a4f358fa009e05565077924c50ddc69/.tmp/cf/d334a83da16a4ab49d8f6294d3eefd71 is 71, key is 1008445383b1621cf5d93ddcadb094dc/cf:q/1733632208084/Put/seqid=0
2024-12-08T04:30:08,315 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742334_1510 (size=5288)
2024-12-08T04:30:08,315 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742334_1510 (size=5288)
2024-12-08T04:30:08,315 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742334_1510 (size=5288)
2024-12-08T04:30:08,326 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742335_1511 (size=8324)
2024-12-08T04:30:08,326 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742335_1511 (size=8324)
2024-12-08T04:30:08,327 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742335_1511 (size=8324)
2024-12-08T04:30:08,332 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.06 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithSkipTmp/6a4f358fa009e05565077924c50ddc69/.tmp/cf/d334a83da16a4ab49d8f6294d3eefd71
2024-12-08T04:30:08,339 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithSkipTmp/6a4f358fa009e05565077924c50ddc69/.tmp/cf/d334a83da16a4ab49d8f6294d3eefd71 as hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithSkipTmp/6a4f358fa009e05565077924c50ddc69/cf/d334a83da16a4ab49d8f6294d3eefd71
2024-12-08T04:30:08,343 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithSkipTmp/6a4f358fa009e05565077924c50ddc69/cf/d334a83da16a4ab49d8f6294d3eefd71, entries=47, sequenceid=6, filesize=8.1 K
2024-12-08T04:30:08,344 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.HRegion(3040): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for 6a4f358fa009e05565077924c50ddc69 in 66ms, sequenceid=6, compaction requested=false
2024-12-08T04:30:08,344 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.HRegion(2538): Flush status journal for 6a4f358fa009e05565077924c50ddc69:

2024-12-08T04:30:08,344 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1733632206323.6a4f358fa009e05565077924c50ddc69. for snaptb0-testExportFileSystemStateWithSkipTmp completed.
2024-12-08T04:30:08,344 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1733632206323.6a4f358fa009e05565077924c50ddc69.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp
2024-12-08T04:30:08,344 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] snapshot.SnapshotManifest(246): Creating references for hfiles
2024-12-08T04:30:08,344 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithSkipTmp/6a4f358fa009e05565077924c50ddc69/cf/d334a83da16a4ab49d8f6294d3eefd71] hfiles
2024-12-08T04:30:08,344 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithSkipTmp/6a4f358fa009e05565077924c50ddc69/cf/d334a83da16a4ab49d8f6294d3eefd71 for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp
2024-12-08T04:30:08,352 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742336_1512 (size=121)
2024-12-08T04:30:08,352 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742336_1512 (size=121)
2024-12-08T04:30:08,352 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742336_1512 (size=121)
2024-12-08T04:30:08,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=206
2024-12-08T04:30:08,717 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=199 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithSkipTmp/976b979028e460504e292af924e9f145/.tmp/cf/7cfbb8f4cbb9465c8f114512c6e99ba9
2024-12-08T04:30:08,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=206
2024-12-08T04:30:08,724 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithSkipTmp/976b979028e460504e292af924e9f145/.tmp/cf/7cfbb8f4cbb9465c8f114512c6e99ba9 as hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithSkipTmp/976b979028e460504e292af924e9f145/cf/7cfbb8f4cbb9465c8f114512c6e99ba9
2024-12-08T04:30:08,735 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithSkipTmp/976b979028e460504e292af924e9f145/cf/7cfbb8f4cbb9465c8f114512c6e99ba9, entries=3, sequenceid=6, filesize=5.2 K
2024-12-08T04:30:08,737 INFO  [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.HRegion(3040): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for 976b979028e460504e292af924e9f145 in 459ms, sequenceid=6, compaction requested=false
2024-12-08T04:30:08,737 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.HRegion(2538): Flush status journal for 976b979028e460504e292af924e9f145:

2024-12-08T04:30:08,737 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1733632206323.976b979028e460504e292af924e9f145. for snaptb0-testExportFileSystemStateWithSkipTmp completed.
2024-12-08T04:30:08,737 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1733632206323.976b979028e460504e292af924e9f145.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp
2024-12-08T04:30:08,737 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] snapshot.SnapshotManifest(246): Creating references for hfiles
2024-12-08T04:30:08,737 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithSkipTmp/976b979028e460504e292af924e9f145/cf/7cfbb8f4cbb9465c8f114512c6e99ba9] hfiles
2024-12-08T04:30:08,737 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithSkipTmp/976b979028e460504e292af924e9f145/cf/7cfbb8f4cbb9465c8f114512c6e99ba9 for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp
2024-12-08T04:30:08,751 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733632206323.6a4f358fa009e05565077924c50ddc69.
2024-12-08T04:30:08,751 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=208
2024-12-08T04:30:08,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster(4106): Remote procedure done, pid=208
2024-12-08T04:30:08,752 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region 6a4f358fa009e05565077924c50ddc69
2024-12-08T04:30:08,752 INFO  [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=208, ppid=206, state=RUNNABLE; SnapshotRegionProcedure 6a4f358fa009e05565077924c50ddc69
2024-12-08T04:30:08,754 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=208, ppid=206, state=SUCCESS; SnapshotRegionProcedure 6a4f358fa009e05565077924c50ddc69 in 629 msec
2024-12-08T04:30:08,770 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742337_1513 (size=121)
2024-12-08T04:30:08,770 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742337_1513 (size=121)
2024-12-08T04:30:08,771 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742337_1513 (size=121)
2024-12-08T04:30:08,773 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733632206323.976b979028e460504e292af924e9f145.
2024-12-08T04:30:08,773 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/428ded7e54d6:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=207
2024-12-08T04:30:08,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster(4106): Remote procedure done, pid=207
2024-12-08T04:30:08,774 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region 976b979028e460504e292af924e9f145
2024-12-08T04:30:08,774 INFO  [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=207, ppid=206, state=RUNNABLE; SnapshotRegionProcedure 976b979028e460504e292af924e9f145
2024-12-08T04:30:08,777 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=207, resume processing ppid=206
2024-12-08T04:30:08,777 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=207, ppid=206, state=SUCCESS; SnapshotRegionProcedure 976b979028e460504e292af924e9f145 in 651 msec
2024-12-08T04:30:08,778 INFO  [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS
2024-12-08T04:30:08,778 INFO  [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION
2024-12-08T04:30:08,779 INFO  [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT
2024-12-08T04:30:08,779 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithSkipTmp
2024-12-08T04:30:08,780 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp
2024-12-08T04:30:08,857 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742338_1514 (size=663)
2024-12-08T04:30:08,858 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742338_1514 (size=663)
2024-12-08T04:30:08,858 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742338_1514 (size=663)
2024-12-08T04:30:08,867 INFO  [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT
2024-12-08T04:30:08,875 INFO  [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT
2024-12-08T04:30:08,875 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp
2024-12-08T04:30:08,877 INFO  [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION
2024-12-08T04:30:08,877 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 206
2024-12-08T04:30:08,879 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=206, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 764 msec
2024-12-08T04:30:09,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=206
2024-12-08T04:30:09,219 INFO  [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp, procId: 206 completed
2024-12-08T04:30:09,220 INFO  [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632209220
2024-12-08T04:30:09,220 INFO  [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:41407, tgtDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632209220, rawTgtDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632209220, srcFsUri=hdfs://localhost:41407, srcDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720
2024-12-08T04:30:09,267 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:41407, inputRoot=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720
2024-12-08T04:30:09,267 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1548841327_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632209220, skipTmp=true, initialOutputSnapshotDir=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632209220/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp
2024-12-08T04:30:09,270 INFO  [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity.
2024-12-08T04:30:09,276 INFO  [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632209220/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp
2024-12-08T04:30:09,343 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742339_1515 (size=198)
2024-12-08T04:30:09,343 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742339_1515 (size=198)
2024-12-08T04:30:09,344 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742339_1515 (size=198)
2024-12-08T04:30:09,365 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742340_1516 (size=663)
2024-12-08T04:30:09,366 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742340_1516 (size=663)
2024-12-08T04:30:09,367 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742340_1516 (size=663)
2024-12-08T04:30:09,370 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar
2024-12-08T04:30:09,370 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar
2024-12-08T04:30:09,371 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar
2024-12-08T04:30:09,371 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar
2024-12-08T04:30:10,671 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/hadoop-9938562928415449948.jar
2024-12-08T04:30:10,672 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar
2024-12-08T04:30:10,672 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar
2024-12-08T04:30:10,735 INFO  [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733631992429_0009_000001 (auth:SIMPLE) from 127.0.0.1:49638
2024-12-08T04:30:10,746 WARN  [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-1_1/usercache/jenkins/appcache/application_1733631992429_0009/container_1733631992429_0009_01_000001/launch_container.sh]
2024-12-08T04:30:10,747 WARN  [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-1_1/usercache/jenkins/appcache/application_1733631992429_0009/container_1733631992429_0009_01_000001/container_tokens]
2024-12-08T04:30:10,747 WARN  [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-1_1/usercache/jenkins/appcache/application_1733631992429_0009/container_1733631992429_0009_01_000001/sysfs]
2024-12-08T04:30:10,770 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/hadoop-10810138404749696954.jar
2024-12-08T04:30:10,771 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar
2024-12-08T04:30:10,771 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar
2024-12-08T04:30:10,772 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar
2024-12-08T04:30:10,772 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar
2024-12-08T04:30:10,772 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar
2024-12-08T04:30:10,773 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar
2024-12-08T04:30:10,773 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar
2024-12-08T04:30:10,773 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar
2024-12-08T04:30:10,773 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar
2024-12-08T04:30:10,774 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar
2024-12-08T04:30:10,774 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar
2024-12-08T04:30:10,774 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar
2024-12-08T04:30:10,775 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar
2024-12-08T04:30:10,775 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar
2024-12-08T04:30:10,775 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar
2024-12-08T04:30:10,775 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar
2024-12-08T04:30:10,776 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar
2024-12-08T04:30:10,776 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar
2024-12-08T04:30:10,777 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar
2024-12-08T04:30:10,777 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar
2024-12-08T04:30:10,777 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar
2024-12-08T04:30:10,778 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar
2024-12-08T04:30:10,778 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar
2024-12-08T04:30:10,778 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar
2024-12-08T04:30:10,778 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar
2024-12-08T04:30:10,855 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742341_1517 (size=127628)
2024-12-08T04:30:10,855 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742341_1517 (size=127628)
2024-12-08T04:30:10,856 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742341_1517 (size=127628)
2024-12-08T04:30:10,924 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742342_1518 (size=2172101)
2024-12-08T04:30:10,925 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742342_1518 (size=2172101)
2024-12-08T04:30:10,925 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742342_1518 (size=2172101)
2024-12-08T04:30:10,946 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742343_1519 (size=213228)
2024-12-08T04:30:10,946 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742343_1519 (size=213228)
2024-12-08T04:30:10,946 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742343_1519 (size=213228)
2024-12-08T04:30:10,958 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742344_1520 (size=1877034)
2024-12-08T04:30:10,958 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742344_1520 (size=1877034)
2024-12-08T04:30:10,959 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742344_1520 (size=1877034)
2024-12-08T04:30:10,970 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742345_1521 (size=533455)
2024-12-08T04:30:10,970 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742345_1521 (size=533455)
2024-12-08T04:30:10,971 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742345_1521 (size=533455)
2024-12-08T04:30:11,003 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742346_1522 (size=7280644)
2024-12-08T04:30:11,003 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742346_1522 (size=7280644)
2024-12-08T04:30:11,003 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742346_1522 (size=7280644)
2024-12-08T04:30:11,024 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742347_1523 (size=4188619)
2024-12-08T04:30:11,024 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742347_1523 (size=4188619)
2024-12-08T04:30:11,024 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742347_1523 (size=4188619)
2024-12-08T04:30:11,039 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742348_1524 (size=20406)
2024-12-08T04:30:11,040 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742348_1524 (size=20406)
2024-12-08T04:30:11,040 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742348_1524 (size=20406)
2024-12-08T04:30:11,055 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742349_1525 (size=75495)
2024-12-08T04:30:11,055 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742349_1525 (size=75495)
2024-12-08T04:30:11,056 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742349_1525 (size=75495)
2024-12-08T04:30:11,063 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742350_1526 (size=45609)
2024-12-08T04:30:11,063 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742350_1526 (size=45609)
2024-12-08T04:30:11,063 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742350_1526 (size=45609)
2024-12-08T04:30:11,071 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742351_1527 (size=451756)
2024-12-08T04:30:11,071 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742351_1527 (size=451756)
2024-12-08T04:30:11,071 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742351_1527 (size=451756)
2024-12-08T04:30:11,078 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742352_1528 (size=110084)
2024-12-08T04:30:11,078 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742352_1528 (size=110084)
2024-12-08T04:30:11,079 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742352_1528 (size=110084)
2024-12-08T04:30:11,093 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742353_1529 (size=1323991)
2024-12-08T04:30:11,093 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742353_1529 (size=1323991)
2024-12-08T04:30:11,094 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742353_1529 (size=1323991)
2024-12-08T04:30:11,100 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742354_1530 (size=23076)
2024-12-08T04:30:11,100 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742354_1530 (size=23076)
2024-12-08T04:30:11,100 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742354_1530 (size=23076)
2024-12-08T04:30:11,107 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742355_1531 (size=126803)
2024-12-08T04:30:11,107 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742355_1531 (size=126803)
2024-12-08T04:30:11,107 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742355_1531 (size=126803)
2024-12-08T04:30:11,114 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742356_1532 (size=322274)
2024-12-08T04:30:11,114 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742356_1532 (size=322274)
2024-12-08T04:30:11,114 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742356_1532 (size=322274)
2024-12-08T04:30:11,125 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742357_1533 (size=1832290)
2024-12-08T04:30:11,126 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742357_1533 (size=1832290)
2024-12-08T04:30:11,126 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742357_1533 (size=1832290)
2024-12-08T04:30:11,156 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742358_1534 (size=6350155)
2024-12-08T04:30:11,156 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742358_1534 (size=6350155)
2024-12-08T04:30:11,157 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742358_1534 (size=6350155)
2024-12-08T04:30:11,170 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742359_1535 (size=30081)
2024-12-08T04:30:11,170 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742359_1535 (size=30081)
2024-12-08T04:30:11,171 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742359_1535 (size=30081)
2024-12-08T04:30:11,177 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742360_1536 (size=53616)
2024-12-08T04:30:11,177 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742360_1536 (size=53616)
2024-12-08T04:30:11,177 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742360_1536 (size=53616)
2024-12-08T04:30:11,184 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742361_1537 (size=29229)
2024-12-08T04:30:11,184 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742361_1537 (size=29229)
2024-12-08T04:30:11,184 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742361_1537 (size=29229)
2024-12-08T04:30:11,193 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742362_1538 (size=169089)
2024-12-08T04:30:11,193 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742362_1538 (size=169089)
2024-12-08T04:30:11,194 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742362_1538 (size=169089)
2024-12-08T04:30:11,217 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742363_1539 (size=5175431)
2024-12-08T04:30:11,217 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742363_1539 (size=5175431)
2024-12-08T04:30:11,217 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742363_1539 (size=5175431)
2024-12-08T04:30:11,229 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742364_1540 (size=136454)
2024-12-08T04:30:11,230 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742364_1540 (size=136454)
2024-12-08T04:30:11,231 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742364_1540 (size=136454)
2024-12-08T04:30:11,244 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742365_1541 (size=907852)
2024-12-08T04:30:11,244 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742365_1541 (size=907852)
2024-12-08T04:30:11,245 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742365_1541 (size=907852)
2024-12-08T04:30:11,264 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742366_1542 (size=3317408)
2024-12-08T04:30:11,264 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742366_1542 (size=3317408)
2024-12-08T04:30:11,264 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742366_1542 (size=3317408)
2024-12-08T04:30:11,274 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742367_1543 (size=503880)
2024-12-08T04:30:11,274 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742367_1543 (size=503880)
2024-12-08T04:30:11,275 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742367_1543 (size=503880)
2024-12-08T04:30:11,317 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742368_1544 (size=4695811)
2024-12-08T04:30:11,318 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742368_1544 (size=4695811)
2024-12-08T04:30:11,318 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742368_1544 (size=4695811)
2024-12-08T04:30:11,319 WARN  [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set.  User classes may not be found. See Job or Job#setJar(String).
2024-12-08T04:30:11,321 INFO  [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testExportFileSystemStateWithSkipTmp' hfile list
2024-12-08T04:30:11,323 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K
2024-12-08T04:30:11,329 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742369_1545 (size=366)
2024-12-08T04:30:11,330 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742369_1545 (size=366)
2024-12-08T04:30:11,330 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742369_1545 (size=366)
2024-12-08T04:30:11,336 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742370_1546 (size=15)
2024-12-08T04:30:11,336 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742370_1546 (size=15)
2024-12-08T04:30:11,336 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742370_1546 (size=15)
2024-12-08T04:30:11,370 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742371_1547 (size=305055)
2024-12-08T04:30:11,370 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742371_1547 (size=305055)
2024-12-08T04:30:11,372 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742371_1547 (size=305055)
2024-12-08T04:30:11,391 WARN  [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start
2024-12-08T04:30:11,391 WARN  [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start
2024-12-08T04:30:11,469 INFO  [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733631992429_0010_000001 (auth:SIMPLE) from 127.0.0.1:55466
2024-12-08T04:30:11,552 WARN  [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties
2024-12-08T04:30:13,632 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp
2024-12-08T04:30:13,632 INFO  [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp Metrics about Tables on a single HBase RegionServer
2024-12-08T04:30:13,633 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum
2024-12-08T04:30:16,593 INFO  [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733631992429_0010_000001 (auth:SIMPLE) from 127.0.0.1:33414
2024-12-08T04:30:16,854 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742372_1548 (size=350753)
2024-12-08T04:30:16,855 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742372_1548 (size=350753)
2024-12-08T04:30:16,855 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742372_1548 (size=350753)
2024-12-08T04:30:18,869 INFO  [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733631992429_0010_000001 (auth:SIMPLE) from 127.0.0.1:42328
2024-12-08T04:30:19,135 WARN  [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties
2024-12-08T04:30:19,812 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region b0618e5cec1ba295985f16f1dd465d87, had cached 0 bytes from a total of 5356
2024-12-08T04:30:19,812 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 430b6d8c8c366152be49a2e6dcaf8f87, had cached 0 bytes from a total of 8258
2024-12-08T04:30:22,004 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details.
2024-12-08T04:30:23,444 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742373_1549 (size=8324)
2024-12-08T04:30:23,444 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742373_1549 (size=8324)
2024-12-08T04:30:23,444 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742373_1549 (size=8324)
2024-12-08T04:30:23,500 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742374_1550 (size=5288)
2024-12-08T04:30:23,501 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742374_1550 (size=5288)
2024-12-08T04:30:23,501 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742374_1550 (size=5288)
2024-12-08T04:30:23,590 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742375_1551 (size=17455)
2024-12-08T04:30:23,590 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742375_1551 (size=17455)
2024-12-08T04:30:23,590 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742375_1551 (size=17455)
2024-12-08T04:30:23,613 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742376_1552 (size=476)
2024-12-08T04:30:23,615 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742376_1552 (size=476)
2024-12-08T04:30:23,615 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742376_1552 (size=476)
2024-12-08T04:30:23,662 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742377_1553 (size=17455)
2024-12-08T04:30:23,662 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742377_1553 (size=17455)
2024-12-08T04:30:23,662 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742377_1553 (size=17455)
2024-12-08T04:30:23,699 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742378_1554 (size=350753)
2024-12-08T04:30:23,699 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742378_1554 (size=350753)
2024-12-08T04:30:23,700 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742378_1554 (size=350753)
2024-12-08T04:30:25,516 INFO  [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export
2024-12-08T04:30:25,516 INFO  [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity.
2024-12-08T04:30:25,521 INFO  [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb0-testExportFileSystemStateWithSkipTmp
2024-12-08T04:30:25,521 INFO  [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot
2024-12-08T04:30:25,522 INFO  [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state
2024-12-08T04:30:25,522 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1548841327_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp
2024-12-08T04:30:25,522 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo
2024-12-08T04:30:25,522 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest
2024-12-08T04:30:25,522 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1548841327_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632209220/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632209220/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp
2024-12-08T04:30:25,523 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632209220/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo
2024-12-08T04:30:25,523 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/export-test/export-1733632209220/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest
2024-12-08T04:30:25,528 INFO  [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportFileSystemStateWithSkipTmp
2024-12-08T04:30:25,528 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithSkipTmp
2024-12-08T04:30:25,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure2.ProcedureExecutor(1098): Stored pid=209, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp
2024-12-08T04:30:25,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=209
2024-12-08T04:30:25,530 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733632225530"}]},"ts":"1733632225530"}
2024-12-08T04:30:25,531 INFO  [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLING in hbase:meta
2024-12-08T04:30:25,533 INFO  [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLING
2024-12-08T04:30:25,534 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=210, ppid=209, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp}]
2024-12-08T04:30:25,535 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=211, ppid=210, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=976b979028e460504e292af924e9f145, UNASSIGN}, {pid=212, ppid=210, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=6a4f358fa009e05565077924c50ddc69, UNASSIGN}]
2024-12-08T04:30:25,535 INFO  [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=212, ppid=210, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=6a4f358fa009e05565077924c50ddc69, UNASSIGN
2024-12-08T04:30:25,535 INFO  [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=211, ppid=210, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=976b979028e460504e292af924e9f145, UNASSIGN
2024-12-08T04:30:25,536 INFO  [PEWorker-4 {}] assignment.RegionStateStore(202): pid=212 updating hbase:meta row=6a4f358fa009e05565077924c50ddc69, regionState=CLOSING, regionLocation=428ded7e54d6,46421,1733631984115
2024-12-08T04:30:25,536 INFO  [PEWorker-3 {}] assignment.RegionStateStore(202): pid=211 updating hbase:meta row=976b979028e460504e292af924e9f145, regionState=CLOSING, regionLocation=428ded7e54d6,41743,1733631984189
2024-12-08T04:30:25,537 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false
2024-12-08T04:30:25,537 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=213, ppid=212, state=RUNNABLE; CloseRegionProcedure 6a4f358fa009e05565077924c50ddc69, server=428ded7e54d6,46421,1733631984115}]
2024-12-08T04:30:25,537 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false
2024-12-08T04:30:25,537 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=214, ppid=211, state=RUNNABLE; CloseRegionProcedure 976b979028e460504e292af924e9f145, server=428ded7e54d6,41743,1733631984189}]
2024-12-08T04:30:25,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=209
2024-12-08T04:30:25,688 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,46421,1733631984115
2024-12-08T04:30:25,689 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] handler.UnassignRegionHandler(124): Close 6a4f358fa009e05565077924c50ddc69
2024-12-08T04:30:25,689 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false
2024-12-08T04:30:25,689 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1681): Closing 6a4f358fa009e05565077924c50ddc69, disabling compactions & flushes
2024-12-08T04:30:25,689 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1733632206323.6a4f358fa009e05565077924c50ddc69.
2024-12-08T04:30:25,689 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733632206323.6a4f358fa009e05565077924c50ddc69.
2024-12-08T04:30:25,689 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733632206323.6a4f358fa009e05565077924c50ddc69. after waiting 0 ms
2024-12-08T04:30:25,689 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1733632206323.6a4f358fa009e05565077924c50ddc69.
2024-12-08T04:30:25,689 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 428ded7e54d6,41743,1733631984189
2024-12-08T04:30:25,689 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] handler.UnassignRegionHandler(124): Close 976b979028e460504e292af924e9f145
2024-12-08T04:30:25,690 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false
2024-12-08T04:30:25,690 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1681): Closing 976b979028e460504e292af924e9f145, disabling compactions & flushes
2024-12-08T04:30:25,690 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1733632206323.976b979028e460504e292af924e9f145.
2024-12-08T04:30:25,690 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733632206323.976b979028e460504e292af924e9f145.
2024-12-08T04:30:25,690 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733632206323.976b979028e460504e292af924e9f145. after waiting 0 ms
2024-12-08T04:30:25,690 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1733632206323.976b979028e460504e292af924e9f145.
2024-12-08T04:30:25,693 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithSkipTmp/6a4f358fa009e05565077924c50ddc69/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1
2024-12-08T04:30:25,694 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithSkipTmp/976b979028e460504e292af924e9f145/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1
2024-12-08T04:30:25,694 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController
2024-12-08T04:30:25,694 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1733632206323.6a4f358fa009e05565077924c50ddc69.
2024-12-08T04:30:25,694 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1635): Region close journal for 6a4f358fa009e05565077924c50ddc69:

2024-12-08T04:30:25,694 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController
2024-12-08T04:30:25,694 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithSkipTmp,,1733632206323.976b979028e460504e292af924e9f145.
2024-12-08T04:30:25,694 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1635): Region close journal for 976b979028e460504e292af924e9f145:

2024-12-08T04:30:25,695 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] handler.UnassignRegionHandler(170): Closed 6a4f358fa009e05565077924c50ddc69
2024-12-08T04:30:25,695 INFO  [PEWorker-5 {}] assignment.RegionStateStore(202): pid=212 updating hbase:meta row=6a4f358fa009e05565077924c50ddc69, regionState=CLOSED
2024-12-08T04:30:25,696 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] handler.UnassignRegionHandler(170): Closed 976b979028e460504e292af924e9f145
2024-12-08T04:30:25,696 INFO  [PEWorker-2 {}] assignment.RegionStateStore(202): pid=211 updating hbase:meta row=976b979028e460504e292af924e9f145, regionState=CLOSED
2024-12-08T04:30:25,698 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=213, resume processing ppid=212
2024-12-08T04:30:25,698 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=213, ppid=212, state=SUCCESS; CloseRegionProcedure 6a4f358fa009e05565077924c50ddc69, server=428ded7e54d6,46421,1733631984115 in 160 msec
2024-12-08T04:30:25,698 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=214, resume processing ppid=211
2024-12-08T04:30:25,699 INFO  [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=212, ppid=210, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=6a4f358fa009e05565077924c50ddc69, UNASSIGN in 163 msec
2024-12-08T04:30:25,699 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=214, ppid=211, state=SUCCESS; CloseRegionProcedure 976b979028e460504e292af924e9f145, server=428ded7e54d6,41743,1733631984189 in 160 msec
2024-12-08T04:30:25,699 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=211, resume processing ppid=210
2024-12-08T04:30:25,699 INFO  [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=211, ppid=210, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=976b979028e460504e292af924e9f145, UNASSIGN in 163 msec
2024-12-08T04:30:25,701 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=210, resume processing ppid=209
2024-12-08T04:30:25,701 INFO  [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=210, ppid=209, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 166 msec
2024-12-08T04:30:25,702 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733632225702"}]},"ts":"1733632225702"}
2024-12-08T04:30:25,702 INFO  [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLED in hbase:meta
2024-12-08T04:30:25,704 INFO  [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLED
2024-12-08T04:30:25,705 INFO  [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=209, state=SUCCESS; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 177 msec
2024-12-08T04:30:25,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=209
2024-12-08T04:30:25,832 INFO  [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp, procId: 209 completed
2024-12-08T04:30:25,832 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithSkipTmp
2024-12-08T04:30:25,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] procedure2.ProcedureExecutor(1098): Stored pid=215, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp
2024-12-08T04:30:25,834 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=215, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp
2024-12-08T04:30:25,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportFileSystemStateWithSkipTmp
2024-12-08T04:30:25,834 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=215, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp
2024-12-08T04:30:25,835 INFO  [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41743 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithSkipTmp
2024-12-08T04:30:25,837 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithSkipTmp/976b979028e460504e292af924e9f145
2024-12-08T04:30:25,837 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithSkipTmp/6a4f358fa009e05565077924c50ddc69
2024-12-08T04:30:25,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp
2024-12-08T04:30:25,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp
2024-12-08T04:30:25,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46421-0x1006fe072e80002, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp
2024-12-08T04:30:25,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp
2024-12-08T04:30:25,839 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithSkipTmp/6a4f358fa009e05565077924c50ddc69/cf, FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithSkipTmp/6a4f358fa009e05565077924c50ddc69/recovered.edits]
2024-12-08T04:30:25,839 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithSkipTmp/976b979028e460504e292af924e9f145/cf, FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithSkipTmp/976b979028e460504e292af924e9f145/recovered.edits]
2024-12-08T04:30:25,839 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF
2024-12-08T04:30:25,839 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF
2024-12-08T04:30:25,839 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF
2024-12-08T04:30:25,839 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF
2024-12-08T04:30:25,840 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp
2024-12-08T04:30:25,840 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp
2024-12-08T04:30:25,840 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46421-0x1006fe072e80002, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp
2024-12-08T04:30:25,840 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:30:25,840 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46421-0x1006fe072e80002, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:30:25,840 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp
2024-12-08T04:30:25,840 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:30:25,840 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl
2024-12-08T04:30:25,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=215
2024-12-08T04:30:25,841 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04
2024-12-08T04:30:25,841 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04
2024-12-08T04:30:25,841 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04
2024-12-08T04:30:25,841 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04
2024-12-08T04:30:25,843 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithSkipTmp/6a4f358fa009e05565077924c50ddc69/cf/d334a83da16a4ab49d8f6294d3eefd71 to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/6a4f358fa009e05565077924c50ddc69/cf/d334a83da16a4ab49d8f6294d3eefd71
2024-12-08T04:30:25,843 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithSkipTmp/976b979028e460504e292af924e9f145/cf/7cfbb8f4cbb9465c8f114512c6e99ba9 to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/976b979028e460504e292af924e9f145/cf/7cfbb8f4cbb9465c8f114512c6e99ba9
2024-12-08T04:30:25,845 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithSkipTmp/6a4f358fa009e05565077924c50ddc69/recovered.edits/9.seqid to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/6a4f358fa009e05565077924c50ddc69/recovered.edits/9.seqid
2024-12-08T04:30:25,845 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithSkipTmp/976b979028e460504e292af924e9f145/recovered.edits/9.seqid to hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/976b979028e460504e292af924e9f145/recovered.edits/9.seqid
2024-12-08T04:30:25,846 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithSkipTmp/6a4f358fa009e05565077924c50ddc69
2024-12-08T04:30:25,846 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testtb-testExportFileSystemStateWithSkipTmp/976b979028e460504e292af924e9f145
2024-12-08T04:30:25,846 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithSkipTmp regions
2024-12-08T04:30:25,847 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=215, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp
2024-12-08T04:30:25,849 WARN  [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithSkipTmp from hbase:meta
2024-12-08T04:30:25,851 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithSkipTmp' descriptor.
2024-12-08T04:30:25,852 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=215, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp
2024-12-08T04:30:25,852 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithSkipTmp' from region states.
2024-12-08T04:30:25,852 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1733632206323.976b979028e460504e292af924e9f145.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733632225852"}]},"ts":"9223372036854775807"}
2024-12-08T04:30:25,852 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1733632206323.6a4f358fa009e05565077924c50ddc69.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733632225852"}]},"ts":"9223372036854775807"}
2024-12-08T04:30:25,853 INFO  [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META
2024-12-08T04:30:25,853 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 976b979028e460504e292af924e9f145, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1733632206323.976b979028e460504e292af924e9f145.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 6a4f358fa009e05565077924c50ddc69, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1733632206323.6a4f358fa009e05565077924c50ddc69.', STARTKEY => '1', ENDKEY => ''}]
2024-12-08T04:30:25,853 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithSkipTmp' as deleted.
2024-12-08T04:30:25,854 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733632225853"}]},"ts":"9223372036854775807"}
2024-12-08T04:30:25,855 INFO  [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportFileSystemStateWithSkipTmp state from META
2024-12-08T04:30:25,856 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=215, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp
2024-12-08T04:30:25,857 INFO  [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=215, state=SUCCESS; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 24 msec
2024-12-08T04:30:25,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=215
2024-12-08T04:30:25,942 INFO  [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp, procId: 215 completed
2024-12-08T04:30:25,947 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithSkipTmp"

2024-12-08T04:30:25,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithSkipTmp
2024-12-08T04:30:25,949 INFO  [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithSkipTmp"

2024-12-08T04:30:25,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportFileSystemStateWithSkipTmp
2024-12-08T04:30:25,970 INFO  [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=811 (was 802)
Potentially hanging thread: HFileArchiver-22
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: ApplicationMasterLauncher #17
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: hconnection-0x28111a62-shared-pool-51
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: hconnection-0x28111a62-shared-pool-52
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1548841327_22 at /127.0.0.1:49270 [Waiting for operation #4]
	java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
	java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
	app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335)
	app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131)
	java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244)
	java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263)
	java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334)
	java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312)
	app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72)
	app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1548841327_22 at /127.0.0.1:53898 [Waiting for operation #3]
	java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
	java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
	app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335)
	app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131)
	java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244)
	java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263)
	java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334)
	java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312)
	app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72)
	app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: HFileArchiver-21
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: hconnection-0x28111a62-shared-pool-50
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: ApplicationMasterLauncher #18
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
	java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: hconnection-0x28111a62-shared-pool-49
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
	java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1666411814_1 at /127.0.0.1:53866 [Waiting for operation #3]
	java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
	java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
	app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335)
	app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131)
	java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244)
	java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263)
	java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334)
	java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312)
	app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72)
	app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: IPC Client (30462390) connection to localhost/127.0.0.1:36821 from jenkins
	java.base@17.0.11/java.lang.Object.wait(Native Method)
	app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042)
	app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093)

Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1548841327_22 at /127.0.0.1:55092 [Waiting for operation #5]
	java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
	java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
	java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
	app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335)
	app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161)
	app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131)
	java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244)
	java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263)
	java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334)
	java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312)
	app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72)
	app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: process reaper (pid 11097)
	java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method)
	java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36821
	java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
	java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
	java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704)
	java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903)
	app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121)
	java.base@17.0.11/java.lang.Thread.run(Thread.java:840)

Potentially hanging thread: Thread-7736
	java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method)
	java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276)
	java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282)
	java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343)
	java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281)
	java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324)
	java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189)
	java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177)
	java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162)
	java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329)
	java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396)
	app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025)
 - Thread LEAK? -, OpenFileDescriptor=812 (was 810) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=448 (was 431) - SystemLoadAverage LEAK? -, ProcessCount=17 (was 17), AvailableMemoryMB=2578 (was 2756)
2024-12-08T04:30:25,970 WARN  [Time-limited test {}] hbase.ResourceChecker(130): Thread=811 is superior to 500
2024-12-08T04:30:25,970 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(2861): Stopping mini mapreduce cluster...
2024-12-08T04:30:25,977 INFO  [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@584a4c3{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node}
2024-12-08T04:30:25,979 INFO  [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4f58eb73{HTTP/1.1, (http/1.1)}{localhost:0}
2024-12-08T04:30:25,979 INFO  [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging
2024-12-08T04:30:25,980 INFO  [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@30336541{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED}
2024-12-08T04:30:25,980 INFO  [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5699d755{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/hadoop.log.dir/,STOPPED}
2024-12-08T04:30:28,763 WARN  [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-1_0/usercache/jenkins/appcache/application_1733631992429_0010/container_1733631992429_0010_01_000002/launch_container.sh]
2024-12-08T04:30:28,763 WARN  [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-1_0/usercache/jenkins/appcache/application_1733631992429_0010/container_1733631992429_0010_01_000002/container_tokens]
2024-12-08T04:30:28,763 WARN  [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-1_0/usercache/jenkins/appcache/application_1733631992429_0010/container_1733631992429_0010_01_000002/sysfs]
2024-12-08T04:30:29,787 INFO  [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733631992429_0010_000001 (auth:SIMPLE) from 127.0.0.1:33834
2024-12-08T04:30:29,799 WARN  [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-0_0/usercache/jenkins/appcache/application_1733631992429_0010/container_1733631992429_0010_01_000001/launch_container.sh]
2024-12-08T04:30:29,799 WARN  [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-0_0/usercache/jenkins/appcache/application_1733631992429_0010/container_1733631992429_0010_01_000001/container_tokens]
2024-12-08T04:30:29,799 WARN  [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/MiniMRCluster_623418969/yarn-1876987114/MiniMRCluster_623418969-localDir-nm-0_0/usercache/jenkins/appcache/application_1733631992429_0010/container_1733631992429_0010_01_000001/sysfs]
2024-12-08T04:30:31,215 WARN  [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties
2024-12-08T04:30:33,632 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp
2024-12-08T04:30:39,134 WARN  [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties
2024-12-08T04:30:42,991 INFO  [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@594b2648{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node}
2024-12-08T04:30:42,992 INFO  [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@64af6801{HTTP/1.1, (http/1.1)}{localhost:0}
2024-12-08T04:30:42,992 INFO  [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging
2024-12-08T04:30:42,992 INFO  [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@57cb4dc1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED}
2024-12-08T04:30:42,992 INFO  [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3c5b84e3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/hadoop.log.dir/,STOPPED}
2024-12-08T04:30:52,004 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details.
2024-12-08T04:30:59,999 ERROR [Thread[Thread-418,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted
2024-12-08T04:30:59,999 INFO  [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3a6658ca{cluster,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster}
2024-12-08T04:31:00,000 INFO  [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6f0174e7{HTTP/1.1, (http/1.1)}{localhost:0}
2024-12-08T04:31:00,000 INFO  [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging
2024-12-08T04:31:00,000 INFO  [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@39328466{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED}
2024-12-08T04:31:00,000 INFO  [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@37c1c2de{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/hadoop.log.dir/,STOPPED}
2024-12-08T04:31:00,004 WARN  [ApplicationMaster Launcher {}] amlauncher.ApplicationMasterLauncher$LauncherThread(122): org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher$LauncherThread interrupted. Returning.
2024-12-08T04:31:00,009 ERROR [SchedulerEventDispatcher:Event Processor {}] event.EventDispatcher$EventProcessor(72): Returning, interrupted : java.lang.InterruptedException
2024-12-08T04:31:00,009 ERROR [ResourceManager Event Processor Monitor {}] resourcemanager.ResourceManager$SchedulerEventDispatcher$EventProcessorMonitor(1193): Returning, interrupted : java.lang.InterruptedException: sleep interrupted
2024-12-08T04:31:00,012 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741830_1006 (size=946710)
2024-12-08T04:31:00,012 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741830_1006 (size=946710)
2024-12-08T04:31:00,012 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741830_1006 (size=946710)
2024-12-08T04:31:00,014 ERROR [Thread[Thread-442,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted
2024-12-08T04:31:00,019 INFO  [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@8315674{jobhistory,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory}
2024-12-08T04:31:00,020 INFO  [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1fac6a92{HTTP/1.1, (http/1.1)}{localhost:0}
2024-12-08T04:31:00,021 INFO  [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging
2024-12-08T04:31:00,021 INFO  [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@79f1c1ff{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED}
2024-12-08T04:31:00,021 INFO  [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4d920d82{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/hadoop.log.dir/,STOPPED}
2024-12-08T04:31:00,023 ERROR [Thread[Thread-400,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted
2024-12-08T04:31:00,023 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(2864): Mini mapreduce cluster stopped
2024-12-08T04:31:00,024 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster
2024-12-08T04:31:00,024 INFO  [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService
2024-12-08T04:31:00,024 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4d29c4c8 to 127.0.0.1:55878
2024-12-08T04:31:00,024 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client
2024-12-08T04:31:00,024 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster
2024-12-08T04:31:00,024 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1958073676, stopped=false
2024-12-08T04:31:00,025 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController
2024-12-08T04:31:00,025 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver
2024-12-08T04:31:00,025 INFO  [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=428ded7e54d6,46337,1733631983069
2024-12-08T04:31:00,027 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running
2024-12-08T04:31:00,027 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running
2024-12-08T04:31:00,027 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase
2024-12-08T04:31:00,027 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46421-0x1006fe072e80002, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running
2024-12-08T04:31:00,027 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46421-0x1006fe072e80002, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase
2024-12-08T04:31:00,027 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running
2024-12-08T04:31:00,027 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase
2024-12-08T04:31:00,027 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase
2024-12-08T04:31:00,028 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running
2024-12-08T04:31:00,028 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running
2024-12-08T04:31:00,028 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46421-0x1006fe072e80002, quorum=127.0.0.1:55878, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running
2024-12-08T04:31:00,028 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running
2024-12-08T04:31:00,033 INFO  [RS:0;428ded7e54d6:45955 {}] regionserver.HRegionServer(1136): Closing user regions
2024-12-08T04:31:00,033 INFO  [RS:2;428ded7e54d6:41743 {}] regionserver.HRegionServer(1136): Closing user regions
2024-12-08T04:31:00,033 INFO  [RS:1;428ded7e54d6:46421 {}] regionserver.HRegionServer(1136): Closing user regions
2024-12-08T04:31:00,033 INFO  [RS:1;428ded7e54d6:46421 {}] regionserver.HRegionServer(3579): Received CLOSE for 430b6d8c8c366152be49a2e6dcaf8f87
2024-12-08T04:31:00,033 INFO  [RS:0;428ded7e54d6:45955 {}] regionserver.HRegionServer(3579): Received CLOSE for b0618e5cec1ba295985f16f1dd465d87
2024-12-08T04:31:00,033 INFO  [RS:2;428ded7e54d6:41743 {}] regionserver.HRegionServer(3579): Received CLOSE for 092cf4729ca6e7ca2b7aa78df922ed6c
2024-12-08T04:31:00,033 INFO  [RS:2;428ded7e54d6:41743 {}] regionserver.HRegionServer(3579): Received CLOSE for ab3154ca90ccc96a74d87ae33022559e
2024-12-08T04:31:00,034 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing b0618e5cec1ba295985f16f1dd465d87, disabling compactions & flushes
2024-12-08T04:31:00,034 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 092cf4729ca6e7ca2b7aa78df922ed6c, disabling compactions & flushes
2024-12-08T04:31:00,034 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 430b6d8c8c366152be49a2e6dcaf8f87, disabling compactions & flushes
2024-12-08T04:31:00,034 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region testExportExpiredSnapshot,,1733632129462.b0618e5cec1ba295985f16f1dd465d87.
2024-12-08T04:31:00,034 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733631986698.092cf4729ca6e7ca2b7aa78df922ed6c.
2024-12-08T04:31:00,034 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region testExportExpiredSnapshot,1,1733632129462.430b6d8c8c366152be49a2e6dcaf8f87.
2024-12-08T04:31:00,034 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportExpiredSnapshot,,1733632129462.b0618e5cec1ba295985f16f1dd465d87.
2024-12-08T04:31:00,034 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733631986698.092cf4729ca6e7ca2b7aa78df922ed6c.
2024-12-08T04:31:00,034 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportExpiredSnapshot,1,1733632129462.430b6d8c8c366152be49a2e6dcaf8f87.
2024-12-08T04:31:00,034 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on testExportExpiredSnapshot,,1733632129462.b0618e5cec1ba295985f16f1dd465d87. after waiting 0 ms
2024-12-08T04:31:00,034 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733631986698.092cf4729ca6e7ca2b7aa78df922ed6c. after waiting 0 ms
2024-12-08T04:31:00,034 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on testExportExpiredSnapshot,1,1733632129462.430b6d8c8c366152be49a2e6dcaf8f87. after waiting 0 ms
2024-12-08T04:31:00,034 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region testExportExpiredSnapshot,,1733632129462.b0618e5cec1ba295985f16f1dd465d87.
2024-12-08T04:31:00,034 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733631986698.092cf4729ca6e7ca2b7aa78df922ed6c.
2024-12-08T04:31:00,034 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region testExportExpiredSnapshot,1,1733632129462.430b6d8c8c366152be49a2e6dcaf8f87.
2024-12-08T04:31:00,034 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 092cf4729ca6e7ca2b7aa78df922ed6c 1/1 column families, dataSize=78 B heapSize=488 B
2024-12-08T04:31:00,036 INFO  [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping
2024-12-08T04:31:00,038 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client
2024-12-08T04:31:00,038 INFO  [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '428ded7e54d6,45955,1733631983994' *****
2024-12-08T04:31:00,038 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController
2024-12-08T04:31:00,039 INFO  [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested
2024-12-08T04:31:00,039 INFO  [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '428ded7e54d6,46421,1733631984115' *****
2024-12-08T04:31:00,039 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController
2024-12-08T04:31:00,039 INFO  [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested
2024-12-08T04:31:00,039 INFO  [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '428ded7e54d6,41743,1733631984189' *****
2024-12-08T04:31:00,039 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController
2024-12-08T04:31:00,039 INFO  [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested
2024-12-08T04:31:00,039 INFO  [RS:0;428ded7e54d6:45955 {}] regionserver.HeapMemoryManager(220): Stopping
2024-12-08T04:31:00,039 INFO  [RS:1;428ded7e54d6:46421 {}] regionserver.HeapMemoryManager(220): Stopping
2024-12-08T04:31:00,039 INFO  [RS:2;428ded7e54d6:41743 {}] regionserver.HeapMemoryManager(220): Stopping
2024-12-08T04:31:00,039 INFO  [RS:1;428ded7e54d6:46421 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully.
2024-12-08T04:31:00,039 INFO  [RS:0;428ded7e54d6:45955 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully.
2024-12-08T04:31:00,039 INFO  [RS:2;428ded7e54d6:41743 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully.
2024-12-08T04:31:00,039 INFO  [RS:1;428ded7e54d6:46421 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully.
2024-12-08T04:31:00,039 INFO  [RS:2;428ded7e54d6:41743 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully.
2024-12-08T04:31:00,039 INFO  [RS:0;428ded7e54d6:45955 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully.
2024-12-08T04:31:00,039 INFO  [RS:1;428ded7e54d6:46421 {}] regionserver.HRegionServer(1224): stopping server 428ded7e54d6,46421,1733631984115
2024-12-08T04:31:00,039 INFO  [RS:0;428ded7e54d6:45955 {}] regionserver.HRegionServer(1224): stopping server 428ded7e54d6,45955,1733631983994
2024-12-08T04:31:00,039 DEBUG [RS:1;428ded7e54d6:46421 {}] ipc.AbstractRpcClient(514): Stopping rpc client
2024-12-08T04:31:00,039 DEBUG [RS:0;428ded7e54d6:45955 {}] ipc.AbstractRpcClient(514): Stopping rpc client
2024-12-08T04:31:00,039 INFO  [RS:2;428ded7e54d6:41743 {}] regionserver.HRegionServer(3581): Received CLOSE for the region: ab3154ca90ccc96a74d87ae33022559e, which we are already trying to CLOSE, but not completed yet
2024-12-08T04:31:00,039 INFO  [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting
2024-12-08T04:31:00,039 INFO  [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting
2024-12-08T04:31:00,039 INFO  [RS:2;428ded7e54d6:41743 {}] regionserver.HRegionServer(1224): stopping server 428ded7e54d6,41743,1733631984189
2024-12-08T04:31:00,039 INFO  [RS:1;428ded7e54d6:46421 {}] regionserver.HRegionServer(1599): Waiting on 1 regions to close
2024-12-08T04:31:00,039 INFO  [RS:0;428ded7e54d6:45955 {}] regionserver.HRegionServer(1599): Waiting on 1 regions to close
2024-12-08T04:31:00,039 DEBUG [RS:2;428ded7e54d6:41743 {}] ipc.AbstractRpcClient(514): Stopping rpc client
2024-12-08T04:31:00,039 DEBUG [RS:0;428ded7e54d6:45955 {}] regionserver.HRegionServer(1603): Online Regions={b0618e5cec1ba295985f16f1dd465d87=testExportExpiredSnapshot,,1733632129462.b0618e5cec1ba295985f16f1dd465d87.}
2024-12-08T04:31:00,039 DEBUG [RS:1;428ded7e54d6:46421 {}] regionserver.HRegionServer(1603): Online Regions={430b6d8c8c366152be49a2e6dcaf8f87=testExportExpiredSnapshot,1,1733632129462.430b6d8c8c366152be49a2e6dcaf8f87.}
2024-12-08T04:31:00,039 INFO  [RS:2;428ded7e54d6:41743 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish...
2024-12-08T04:31:00,039 INFO  [RS:2;428ded7e54d6:41743 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish...
2024-12-08T04:31:00,039 INFO  [RS:2;428ded7e54d6:41743 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish...
2024-12-08T04:31:00,039 INFO  [RS:2;428ded7e54d6:41743 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740
2024-12-08T04:31:00,042 INFO  [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting
2024-12-08T04:31:00,043 INFO  [RS:2;428ded7e54d6:41743 {}] regionserver.HRegionServer(1599): Waiting on 3 regions to close
2024-12-08T04:31:00,043 DEBUG [RS:2;428ded7e54d6:41743 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740, 092cf4729ca6e7ca2b7aa78df922ed6c=hbase:namespace,,1733631986698.092cf4729ca6e7ca2b7aa78df922ed6c., ab3154ca90ccc96a74d87ae33022559e=hbase:acl,,1733631987898.ab3154ca90ccc96a74d87ae33022559e.}
2024-12-08T04:31:00,043 DEBUG [RS:0;428ded7e54d6:45955 {}] regionserver.HRegionServer(1629): Waiting on b0618e5cec1ba295985f16f1dd465d87
2024-12-08T04:31:00,043 DEBUG [RS_CLOSE_META-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes
2024-12-08T04:31:00,043 DEBUG [RS:1;428ded7e54d6:46421 {}] regionserver.HRegionServer(1629): Waiting on 430b6d8c8c366152be49a2e6dcaf8f87
2024-12-08T04:31:00,043 DEBUG [RS:2;428ded7e54d6:41743 {}] regionserver.HRegionServer(1629): Waiting on 092cf4729ca6e7ca2b7aa78df922ed6c, 1588230740, ab3154ca90ccc96a74d87ae33022559e
2024-12-08T04:31:00,043 INFO  [RS_CLOSE_META-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740
2024-12-08T04:31:00,043 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testExportExpiredSnapshot/430b6d8c8c366152be49a2e6dcaf8f87/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1
2024-12-08T04:31:00,043 DEBUG [RS_CLOSE_META-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740
2024-12-08T04:31:00,043 DEBUG [RS_CLOSE_META-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms
2024-12-08T04:31:00,043 DEBUG [RS_CLOSE_META-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740
2024-12-08T04:31:00,043 INFO  [RS_CLOSE_META-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=68.66 KB heapSize=109 KB
2024-12-08T04:31:00,044 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController
2024-12-08T04:31:00,044 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed testExportExpiredSnapshot,1,1733632129462.430b6d8c8c366152be49a2e6dcaf8f87.
2024-12-08T04:31:00,044 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 430b6d8c8c366152be49a2e6dcaf8f87:

2024-12-08T04:31:00,044 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,1,1733632129462.430b6d8c8c366152be49a2e6dcaf8f87.
2024-12-08T04:31:00,049 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/default/testExportExpiredSnapshot/b0618e5cec1ba295985f16f1dd465d87/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1
2024-12-08T04:31:00,049 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController
2024-12-08T04:31:00,049 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed testExportExpiredSnapshot,,1733632129462.b0618e5cec1ba295985f16f1dd465d87.
2024-12-08T04:31:00,050 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for b0618e5cec1ba295985f16f1dd465d87:

2024-12-08T04:31:00,050 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,,1733632129462.b0618e5cec1ba295985f16f1dd465d87.
2024-12-08T04:31:00,055 INFO  [regionserver/428ded7e54d6:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases
2024-12-08T04:31:00,061 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/hbase/namespace/092cf4729ca6e7ca2b7aa78df922ed6c/.tmp/info/a032268a84484a36b5f1249503d71e7c is 45, key is default/info:d/1733631987758/Put/seqid=0
2024-12-08T04:31:00,066 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742379_1555 (size=5037)
2024-12-08T04:31:00,066 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742379_1555 (size=5037)
2024-12-08T04:31:00,066 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742379_1555 (size=5037)
2024-12-08T04:31:00,067 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/hbase/namespace/092cf4729ca6e7ca2b7aa78df922ed6c/.tmp/info/a032268a84484a36b5f1249503d71e7c
2024-12-08T04:31:00,075 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/hbase/namespace/092cf4729ca6e7ca2b7aa78df922ed6c/.tmp/info/a032268a84484a36b5f1249503d71e7c as hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/hbase/namespace/092cf4729ca6e7ca2b7aa78df922ed6c/info/a032268a84484a36b5f1249503d71e7c
2024-12-08T04:31:00,078 DEBUG [RS_CLOSE_META-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/hbase/meta/1588230740/.tmp/info/4ccf57c382444cad83eec293139f69cb is 173, key is testExportExpiredSnapshot,1,1733632129462.430b6d8c8c366152be49a2e6dcaf8f87./info:regioninfo/1733632129825/Put/seqid=0
2024-12-08T04:31:00,081 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/hbase/namespace/092cf4729ca6e7ca2b7aa78df922ed6c/info/a032268a84484a36b5f1249503d71e7c, entries=2, sequenceid=6, filesize=4.9 K
2024-12-08T04:31:00,082 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 092cf4729ca6e7ca2b7aa78df922ed6c in 48ms, sequenceid=6, compaction requested=false
2024-12-08T04:31:00,083 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742380_1556 (size=15630)
2024-12-08T04:31:00,084 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742380_1556 (size=15630)
2024-12-08T04:31:00,084 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742380_1556 (size=15630)
2024-12-08T04:31:00,085 INFO  [RS_CLOSE_META-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.26 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/hbase/meta/1588230740/.tmp/info/4ccf57c382444cad83eec293139f69cb
2024-12-08T04:31:00,087 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/hbase/namespace/092cf4729ca6e7ca2b7aa78df922ed6c/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1
2024-12-08T04:31:00,088 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController
2024-12-08T04:31:00,088 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1733631986698.092cf4729ca6e7ca2b7aa78df922ed6c.
2024-12-08T04:31:00,088 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 092cf4729ca6e7ca2b7aa78df922ed6c:

2024-12-08T04:31:00,088 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1733631986698.092cf4729ca6e7ca2b7aa78df922ed6c.
2024-12-08T04:31:00,088 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing ab3154ca90ccc96a74d87ae33022559e, disabling compactions & flushes
2024-12-08T04:31:00,088 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:acl,,1733631987898.ab3154ca90ccc96a74d87ae33022559e.
2024-12-08T04:31:00,088 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:acl,,1733631987898.ab3154ca90ccc96a74d87ae33022559e.
2024-12-08T04:31:00,088 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:acl,,1733631987898.ab3154ca90ccc96a74d87ae33022559e. after waiting 0 ms
2024-12-08T04:31:00,088 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:acl,,1733631987898.ab3154ca90ccc96a74d87ae33022559e.
2024-12-08T04:31:00,088 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing ab3154ca90ccc96a74d87ae33022559e 1/1 column families, dataSize=1.38 KB heapSize=3.33 KB
2024-12-08T04:31:00,109 DEBUG [RS_CLOSE_META-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/hbase/meta/1588230740/.tmp/rep_barrier/e76a494435f541c286c40f5149535d7f is 133, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1733632107500.b371213db39acba44c12b50885d6398e./rep_barrier:/1733632127606/DeleteFamily/seqid=0
2024-12-08T04:31:00,109 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/hbase/acl/ab3154ca90ccc96a74d87ae33022559e/.tmp/l/2a9b7c4e901e4e7ba9f05d60d7ffa814 is 74, key is testtb-testExportFileSystemStateWithMergeRegion-1/l:/1733632127561/DeleteFamily/seqid=0
2024-12-08T04:31:00,111 INFO  [regionserver/428ded7e54d6:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases
2024-12-08T04:31:00,112 INFO  [regionserver/428ded7e54d6:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases
2024-12-08T04:31:00,118 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742381_1557 (size=5695)
2024-12-08T04:31:00,118 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742381_1557 (size=5695)
2024-12-08T04:31:00,118 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742381_1557 (size=5695)
2024-12-08T04:31:00,119 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.38 KB at sequenceid=27 (bloomFilter=false), to=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/hbase/acl/ab3154ca90ccc96a74d87ae33022559e/.tmp/l/2a9b7c4e901e4e7ba9f05d60d7ffa814
2024-12-08T04:31:00,122 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 2a9b7c4e901e4e7ba9f05d60d7ffa814
2024-12-08T04:31:00,123 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/hbase/acl/ab3154ca90ccc96a74d87ae33022559e/.tmp/l/2a9b7c4e901e4e7ba9f05d60d7ffa814 as hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/hbase/acl/ab3154ca90ccc96a74d87ae33022559e/l/2a9b7c4e901e4e7ba9f05d60d7ffa814
2024-12-08T04:31:00,125 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742382_1558 (size=8007)
2024-12-08T04:31:00,126 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742382_1558 (size=8007)
2024-12-08T04:31:00,126 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742382_1558 (size=8007)
2024-12-08T04:31:00,126 INFO  [RS_CLOSE_META-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.34 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/hbase/meta/1588230740/.tmp/rep_barrier/e76a494435f541c286c40f5149535d7f
2024-12-08T04:31:00,127 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 2a9b7c4e901e4e7ba9f05d60d7ffa814
2024-12-08T04:31:00,128 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/hbase/acl/ab3154ca90ccc96a74d87ae33022559e/l/2a9b7c4e901e4e7ba9f05d60d7ffa814, entries=12, sequenceid=27, filesize=5.6 K
2024-12-08T04:31:00,128 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~1.38 KB/1412, heapSize ~3.31 KB/3392, currentSize=0 B/0 for ab3154ca90ccc96a74d87ae33022559e in 40ms, sequenceid=27, compaction requested=false
2024-12-08T04:31:00,131 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/hbase/acl/ab3154ca90ccc96a74d87ae33022559e/recovered.edits/30.seqid, newMaxSeqId=30, maxSeqId=1
2024-12-08T04:31:00,132 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController
2024-12-08T04:31:00,132 INFO  [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:acl,,1733631987898.ab3154ca90ccc96a74d87ae33022559e.
2024-12-08T04:31:00,132 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for ab3154ca90ccc96a74d87ae33022559e:

2024-12-08T04:31:00,132 DEBUG [RS_CLOSE_REGION-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:acl,,1733631987898.ab3154ca90ccc96a74d87ae33022559e.
2024-12-08T04:31:00,145 DEBUG [RS_CLOSE_META-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/hbase/meta/1588230740/.tmp/table/acb503d1e29240d5812efd6a2b8e5a1b is 127, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1733632107500.b371213db39acba44c12b50885d6398e./table:/1733632127606/DeleteFamily/seqid=0
2024-12-08T04:31:00,149 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073742383_1559 (size=8861)
2024-12-08T04:31:00,149 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073742383_1559 (size=8861)
2024-12-08T04:31:00,149 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073742383_1559 (size=8861)
2024-12-08T04:31:00,150 INFO  [RS_CLOSE_META-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.06 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/hbase/meta/1588230740/.tmp/table/acb503d1e29240d5812efd6a2b8e5a1b
2024-12-08T04:31:00,153 DEBUG [RS_CLOSE_META-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/hbase/meta/1588230740/.tmp/info/4ccf57c382444cad83eec293139f69cb as hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/hbase/meta/1588230740/info/4ccf57c382444cad83eec293139f69cb
2024-12-08T04:31:00,157 INFO  [RS_CLOSE_META-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/hbase/meta/1588230740/info/4ccf57c382444cad83eec293139f69cb, entries=84, sequenceid=202, filesize=15.3 K
2024-12-08T04:31:00,158 DEBUG [RS_CLOSE_META-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/hbase/meta/1588230740/.tmp/rep_barrier/e76a494435f541c286c40f5149535d7f as hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/hbase/meta/1588230740/rep_barrier/e76a494435f541c286c40f5149535d7f
2024-12-08T04:31:00,161 INFO  [RS_CLOSE_META-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/hbase/meta/1588230740/rep_barrier/e76a494435f541c286c40f5149535d7f, entries=21, sequenceid=202, filesize=7.8 K
2024-12-08T04:31:00,162 DEBUG [RS_CLOSE_META-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/hbase/meta/1588230740/.tmp/table/acb503d1e29240d5812efd6a2b8e5a1b as hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/hbase/meta/1588230740/table/acb503d1e29240d5812efd6a2b8e5a1b
2024-12-08T04:31:00,166 INFO  [RS_CLOSE_META-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/hbase/meta/1588230740/table/acb503d1e29240d5812efd6a2b8e5a1b, entries=38, sequenceid=202, filesize=8.7 K
2024-12-08T04:31:00,166 INFO  [RS_CLOSE_META-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~68.66 KB/70312, heapSize ~108.95 KB/111568, currentSize=0 B/0 for 1588230740 in 123ms, sequenceid=202, compaction requested=false
2024-12-08T04:31:00,170 DEBUG [RS_CLOSE_META-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/data/hbase/meta/1588230740/recovered.edits/205.seqid, newMaxSeqId=205, maxSeqId=1
2024-12-08T04:31:00,170 DEBUG [RS_CLOSE_META-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController
2024-12-08T04:31:00,170 DEBUG [RS_CLOSE_META-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint
2024-12-08T04:31:00,170 INFO  [RS_CLOSE_META-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740
2024-12-08T04:31:00,170 DEBUG [RS_CLOSE_META-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740:

2024-12-08T04:31:00,170 DEBUG [RS_CLOSE_META-regionserver/428ded7e54d6:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740
2024-12-08T04:31:00,243 INFO  [RS:0;428ded7e54d6:45955 {}] regionserver.HRegionServer(1250): stopping server 428ded7e54d6,45955,1733631983994; all regions closed.
2024-12-08T04:31:00,243 INFO  [RS:2;428ded7e54d6:41743 {}] regionserver.HRegionServer(1250): stopping server 428ded7e54d6,41743,1733631984189; all regions closed.
2024-12-08T04:31:00,243 INFO  [RS:1;428ded7e54d6:46421 {}] regionserver.HRegionServer(1250): stopping server 428ded7e54d6,46421,1733631984115; all regions closed.
2024-12-08T04:31:00,247 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741833_1009 (size=11510)
2024-12-08T04:31:00,247 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741833_1009 (size=11510)
2024-12-08T04:31:00,247 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741835_1011 (size=11814)
2024-12-08T04:31:00,247 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741833_1009 (size=11510)
2024-12-08T04:31:00,248 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741836_1012 (size=80694)
2024-12-08T04:31:00,248 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741835_1011 (size=11814)
2024-12-08T04:31:00,248 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741836_1012 (size=80694)
2024-12-08T04:31:00,249 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741835_1011 (size=11814)
2024-12-08T04:31:00,249 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741836_1012 (size=80694)
2024-12-08T04:31:00,251 DEBUG [RS:1;428ded7e54d6:46421 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/oldWALs
2024-12-08T04:31:00,252 INFO  [RS:1;428ded7e54d6:46421 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 428ded7e54d6%2C46421%2C1733631984115:(num 1733631986018)
2024-12-08T04:31:00,252 DEBUG [RS:1;428ded7e54d6:46421 {}] ipc.AbstractRpcClient(514): Stopping rpc client
2024-12-08T04:31:00,252 DEBUG [RS:0;428ded7e54d6:45955 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/oldWALs
2024-12-08T04:31:00,252 INFO  [RS:1;428ded7e54d6:46421 {}] regionserver.LeaseManager(133): Closed leases
2024-12-08T04:31:00,252 INFO  [RS:0;428ded7e54d6:45955 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 428ded7e54d6%2C45955%2C1733631983994:(num 1733631986018)
2024-12-08T04:31:00,252 DEBUG [RS:0;428ded7e54d6:45955 {}] ipc.AbstractRpcClient(514): Stopping rpc client
2024-12-08T04:31:00,252 DEBUG [RS:2;428ded7e54d6:41743 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/oldWALs
2024-12-08T04:31:00,252 INFO  [RS:2;428ded7e54d6:41743 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 428ded7e54d6%2C41743%2C1733631984189.meta:.meta(num 1733631986437)
2024-12-08T04:31:00,252 INFO  [RS:0;428ded7e54d6:45955 {}] regionserver.LeaseManager(133): Closed leases
2024-12-08T04:31:00,252 INFO  [RS:0;428ded7e54d6:45955 {}] hbase.ChoreService(370): Chore service for: regionserver/428ded7e54d6:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown
2024-12-08T04:31:00,252 INFO  [RS:1;428ded7e54d6:46421 {}] hbase.ChoreService(370): Chore service for: regionserver/428ded7e54d6:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown
2024-12-08T04:31:00,252 INFO  [RS:0;428ded7e54d6:45955 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish...
2024-12-08T04:31:00,252 INFO  [RS:1;428ded7e54d6:46421 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish...
2024-12-08T04:31:00,252 INFO  [RS:0;428ded7e54d6:45955 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish...
2024-12-08T04:31:00,252 INFO  [RS:1;428ded7e54d6:46421 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish...
2024-12-08T04:31:00,252 INFO  [RS:0;428ded7e54d6:45955 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish...
2024-12-08T04:31:00,252 INFO  [RS:1;428ded7e54d6:46421 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish...
2024-12-08T04:31:00,252 INFO  [regionserver/428ded7e54d6:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting.
2024-12-08T04:31:00,253 INFO  [regionserver/428ded7e54d6:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting.
2024-12-08T04:31:00,253 INFO  [RS:0;428ded7e54d6:45955 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:45955
2024-12-08T04:31:00,254 INFO  [RS:1;428ded7e54d6:46421 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:46421
2024-12-08T04:31:00,255 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46763 is added to blk_1073741834_1010 (size=14950)
2024-12-08T04:31:00,255 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36937 is added to blk_1073741834_1010 (size=14950)
2024-12-08T04:31:00,255 INFO  [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38289 is added to blk_1073741834_1010 (size=14950)
2024-12-08T04:31:00,258 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs
2024-12-08T04:31:00,258 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46421-0x1006fe072e80002, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/428ded7e54d6,46421,1733631984115
2024-12-08T04:31:00,259 DEBUG [RS:2;428ded7e54d6:41743 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/oldWALs
2024-12-08T04:31:00,259 INFO  [RS:2;428ded7e54d6:41743 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 428ded7e54d6%2C41743%2C1733631984189:(num 1733631986018)
2024-12-08T04:31:00,259 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/428ded7e54d6,45955,1733631983994
2024-12-08T04:31:00,259 DEBUG [RS:2;428ded7e54d6:41743 {}] ipc.AbstractRpcClient(514): Stopping rpc client
2024-12-08T04:31:00,259 INFO  [RS:2;428ded7e54d6:41743 {}] regionserver.LeaseManager(133): Closed leases
2024-12-08T04:31:00,259 INFO  [RS:2;428ded7e54d6:41743 {}] hbase.ChoreService(370): Chore service for: regionserver/428ded7e54d6:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown
2024-12-08T04:31:00,259 INFO  [regionserver/428ded7e54d6:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting.
2024-12-08T04:31:00,259 INFO  [RS:2;428ded7e54d6:41743 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:41743
2024-12-08T04:31:00,260 INFO  [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [428ded7e54d6,45955,1733631983994]
2024-12-08T04:31:00,260 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 428ded7e54d6,45955,1733631983994; numProcessing=1
2024-12-08T04:31:00,262 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs
2024-12-08T04:31:00,262 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/428ded7e54d6,41743,1733631984189
2024-12-08T04:31:00,262 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/428ded7e54d6,45955,1733631983994 already deleted, retry=false
2024-12-08T04:31:00,262 INFO  [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 428ded7e54d6,45955,1733631983994 expired; onlineServers=2
2024-12-08T04:31:00,262 INFO  [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [428ded7e54d6,46421,1733631984115]
2024-12-08T04:31:00,262 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 428ded7e54d6,46421,1733631984115; numProcessing=2
2024-12-08T04:31:00,264 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/428ded7e54d6,46421,1733631984115 already deleted, retry=false
2024-12-08T04:31:00,264 INFO  [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 428ded7e54d6,46421,1733631984115 expired; onlineServers=1
2024-12-08T04:31:00,265 INFO  [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [428ded7e54d6,41743,1733631984189]
2024-12-08T04:31:00,265 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 428ded7e54d6,41743,1733631984189; numProcessing=3
2024-12-08T04:31:00,266 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/428ded7e54d6,41743,1733631984189 already deleted, retry=false
2024-12-08T04:31:00,266 INFO  [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 428ded7e54d6,41743,1733631984189 expired; onlineServers=0
2024-12-08T04:31:00,266 INFO  [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '428ded7e54d6,46337,1733631983069' *****
2024-12-08T04:31:00,266 INFO  [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0
2024-12-08T04:31:00,266 DEBUG [M:0;428ded7e54d6:46337 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@48215b3f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=428ded7e54d6/172.17.0.2:0
2024-12-08T04:31:00,266 INFO  [M:0;428ded7e54d6:46337 {}] regionserver.HRegionServer(1224): stopping server 428ded7e54d6,46337,1733631983069
2024-12-08T04:31:00,266 INFO  [M:0;428ded7e54d6:46337 {}] regionserver.HRegionServer(1250): stopping server 428ded7e54d6,46337,1733631983069; all regions closed.
2024-12-08T04:31:00,266 DEBUG [M:0;428ded7e54d6:46337 {}] ipc.AbstractRpcClient(514): Stopping rpc client
2024-12-08T04:31:00,266 DEBUG [M:0;428ded7e54d6:46337 {}] cleaner.LogCleaner(198): Cancelling LogCleaner
2024-12-08T04:31:00,266 WARN  [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting.
2024-12-08T04:31:00,266 DEBUG [M:0;428ded7e54d6:46337 {}] cleaner.HFileCleaner(335): Stopping file delete threads
2024-12-08T04:31:00,266 DEBUG [master/428ded7e54d6:0:becomeActiveMaster-HFileCleaner.small.0-1733631985622 {}] cleaner.HFileCleaner(306): Exit Thread[master/428ded7e54d6:0:becomeActiveMaster-HFileCleaner.small.0-1733631985622,5,FailOnTimeoutGroup]
2024-12-08T04:31:00,266 DEBUG [master/428ded7e54d6:0:becomeActiveMaster-HFileCleaner.large.0-1733631985622 {}] cleaner.HFileCleaner(306): Exit Thread[master/428ded7e54d6:0:becomeActiveMaster-HFileCleaner.large.0-1733631985622,5,FailOnTimeoutGroup]
2024-12-08T04:31:00,267 INFO  [M:0;428ded7e54d6:46337 {}] hbase.ChoreService(370): Chore service for: master/428ded7e54d6:0 had [] on shutdown
2024-12-08T04:31:00,267 DEBUG [M:0;428ded7e54d6:46337 {}] master.HMaster(1733): Stopping service threads
2024-12-08T04:31:00,267 INFO  [M:0;428ded7e54d6:46337 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher
2024-12-08T04:31:00,267 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master
2024-12-08T04:31:00,267 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase
2024-12-08T04:31:00,267 INFO  [M:0;428ded7e54d6:46337 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false
2024-12-08T04:31:00,268 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating.
2024-12-08T04:31:00,268 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master
2024-12-08T04:31:00,268 DEBUG [M:0;428ded7e54d6:46337 {}] zookeeper.ZKUtil(347): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error)
2024-12-08T04:31:00,268 WARN  [M:0;428ded7e54d6:46337 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null
2024-12-08T04:31:00,268 INFO  [M:0;428ded7e54d6:46337 {}] assignment.AssignmentManager(391): Stopping assignment manager
2024-12-08T04:31:00,268 INFO  [M:0;428ded7e54d6:46337 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false
2024-12-08T04:31:00,268 DEBUG [M:0;428ded7e54d6:46337 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes
2024-12-08T04:31:00,282 INFO  [M:0;428ded7e54d6:46337 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682.
2024-12-08T04:31:00,282 DEBUG [M:0;428ded7e54d6:46337 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682.
2024-12-08T04:31:00,282 DEBUG [M:0;428ded7e54d6:46337 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms
2024-12-08T04:31:00,282 DEBUG [M:0;428ded7e54d6:46337 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682.
2024-12-08T04:31:00,282 INFO  [M:0;428ded7e54d6:46337 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=805.26 KB heapSize=966.46 KB
2024-12-08T04:31:00,283 ERROR [AsyncFSWAL-0-hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/MasterData-prefix:428ded7e54d6,46337,1733631983069 {}] server.NIOServerCnxnFactory(85): Thread Thread[AsyncFSWAL-0-hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/MasterData-prefix:428ded7e54d6,46337,1733631983069,5,FailOnTimeoutGroup] died
java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.readableBytes()" because "this.buf" is null
	at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.buffered(FanOutOneBlockAsyncDFSOutput.java:419) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT]
	at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.append(AsyncProtobufLogWriter.java:132) ~[classes/:?]
	at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:830) ~[classes/:?]
	at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:128) ~[classes/:?]
	at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.appendEntry(AbstractFSWAL.java:1148) ~[classes/:?]
	at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.appendAndSync(AsyncFSWAL.java:500) ~[classes/:?]
	at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.consume(AsyncFSWAL.java:603) ~[classes/:?]
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?]
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?]
	at java.lang.Thread.run(Thread.java:840) ~[?:?]
2024-12-08T04:31:00,360 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46421-0x1006fe072e80002, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null
2024-12-08T04:31:00,360 INFO  [RS:1;428ded7e54d6:46421 {}] regionserver.HRegionServer(1307): Exiting; stopping=428ded7e54d6,46421,1733631984115; zookeeper connection closed.
2024-12-08T04:31:00,360 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46421-0x1006fe072e80002, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null
2024-12-08T04:31:00,360 INFO  [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@102199c4 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@102199c4
2024-12-08T04:31:00,361 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null
2024-12-08T04:31:00,361 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45955-0x1006fe072e80001, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null
2024-12-08T04:31:00,361 INFO  [RS:0;428ded7e54d6:45955 {}] regionserver.HRegionServer(1307): Exiting; stopping=428ded7e54d6,45955,1733631983994; zookeeper connection closed.
2024-12-08T04:31:00,361 INFO  [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@10bec3f {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@10bec3f
2024-12-08T04:31:00,364 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null
2024-12-08T04:31:00,364 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41743-0x1006fe072e80003, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null
2024-12-08T04:31:00,364 INFO  [RS:2;428ded7e54d6:41743 {}] regionserver.HRegionServer(1307): Exiting; stopping=428ded7e54d6,41743,1733631984189; zookeeper connection closed.
2024-12-08T04:31:00,364 INFO  [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1915fc41 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1915fc41
2024-12-08T04:31:00,364 INFO  [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete
2024-12-08T04:31:03,632 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController
2024-12-08T04:31:03,632 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta
2024-12-08T04:31:03,633 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace
2024-12-08T04:31:03,633 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl
2024-12-08T04:31:03,633 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController
2024-12-08T04:31:03,633 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver
2024-12-08T04:31:03,633 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController
2024-12-08T04:31:03,633 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint
2024-12-08T04:31:03,633 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot
2024-12-08T04:31:05,559 WARN  [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties
2024-12-08T04:31:22,004 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details.
2024-12-08T04:31:24,314 DEBUG [master/428ded7e54d6:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=7, created chunk count=10, reused chunk count=22, reuseRatio=68.75%
2024-12-08T04:31:24,321 DEBUG [master/428ded7e54d6:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0
2024-12-08T04:31:32,006 WARN  [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties
2024-12-08T04:31:52,005 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details.
Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;428ded7e54d6:46337
225 active threads
Thread 1 (main):
  State: TIMED_WAITING
  Blocked count: 1
  Waited count: 4
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444)
    java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203)
    app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167)
    app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128)
    app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39)
    app//org.junit.rules.RunRules.evaluate(RunRules.java:20)
    app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306)
    app//org.junit.runners.ParentRunner.run(ParentRunner.java:413)
    app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316)
    app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240)
    app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214)
    app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155)
    app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385)
    app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162)
    app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507)
    app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495)
Thread 2 (Reference Handler):
  State: RUNNABLE
  Blocked count: 6
  Waited count: 0
  Stack:
    java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method)
    java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253)
    java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215)
Thread 3 (Finalizer):
  State: WAITING
  Blocked count: 18
  Waited count: 14
  Waiting on java.lang.ref.ReferenceQueue$Lock@6bbd803
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155)
    java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176)
    java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172)
Thread 4 (Signal Dispatcher):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
Thread 12 (Common-Cleaner):
  State: TIMED_WAITING
  Blocked count: 15
  Waited count: 18
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155)
    java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
    java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162)
Thread 13 (Notification Thread):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
Thread 14 (pool-1-thread-1):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 20
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200)
    java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281)
    java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 15 (pool-1-thread-2):
  State: WAITING
  Blocked count: 0
  Waited count: 17
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5ff45b9e
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420)
    java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275)
    java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 16 (surefire-forkedjvm-stream-flusher):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 3446
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 18 (surefire-forkedjvm-command-thread):
  State: WAITING
  Blocked count: 0
  Waited count: 35
  Waiting on java.util.concurrent.CountDownLatch$Sync@409049f9
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047)
    java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230)
    java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178)
    app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127)
    java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244)
    java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284)
    java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343)
    app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169)
    app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50)
    app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430)
    app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419)
    app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116)
    app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77)
    app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60)
    app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 22 (Time-limited test):
  State: RUNNABLE
  Blocked count: 12117
  Waited count: 12666
  Stack:
    java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method)
    java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197)
    java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154)
    app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181)
    app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186)
    app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113)
    app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:394)
    app//org.apache.hadoop.hbase.MiniHBaseCluster.waitUntilShutDown(MiniHBaseCluster.java:921)
    app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniHBaseCluster(HBaseTestingUtility.java:1359)
    app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniCluster(HBaseTestingUtility.java:1341)
    app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121)
    java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77)
    java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
    java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568)
    app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59)
    app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
    app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56)
    app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46)
    app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33)
Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner):
  State: WAITING
  Blocked count: 10
  Waited count: 11
  Waiting on java.lang.ref.ReferenceQueue$Lock@3056cb7e
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155)
    java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176)
    app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 25 (SSL Certificates Store Monitor):
  State: WAITING
  Blocked count: 0
  Waited count: 1
  Waiting on java.util.TaskQueue@4d034e41
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    java.base@17.0.11/java.lang.Object.wait(Object.java:338)
    java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537)
    java.base@17.0.11/java.util.TimerThread.run(Timer.java:516)
Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@263e077f):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 683
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 69
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161)
Thread 36 (pool-6-thread-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 37 (qtp2988114-37):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183)
    app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137)
    app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff88428988.run(Unknown Source)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 38 (qtp2988114-38):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183)
    app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137)
    app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff88428988.run(Unknown Source)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 39 (qtp2988114-39):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183)
    app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137)
    app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff88428988.run(Unknown Source)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 40 (qtp2988114-40):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183)
    app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137)
    app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff88428988.run(Unknown Source)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 41 (qtp2988114-41-acceptor-0@2a09dac-ServerConnector@2d3d9b09{HTTP/1.1, (http/1.1)}{localhost:40303}):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/sun.nio.ch.Net.accept(Native Method)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391)
    app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388)
    app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 42 (qtp2988114-42):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 6
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 43 (qtp2988114-43):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 6
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 44 (qtp2988114-44):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 6
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 45 (Session-HouseKeeper-4898edba-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 46 (pool-7-thread-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 53 (FSEditLogAsync):
  State: WAITING
  Blocked count: 27
  Waited count: 3058
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60ef7501
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420)
    app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241)
    app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 55 (Socket Reader #1 for port 0):
  State: RUNNABLE
  Blocked count: 1
  Waited count: 1
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497)
    app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476)
Thread 56 (IPC Server idle connection scanner for port 41407):
  State: TIMED_WAITING
  Blocked count: 1
  Waited count: 36
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563)
    java.base@17.0.11/java.util.TimerThread.run(Timer.java:516)
Thread 58 (Hadoop-Metrics-Updater-0):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 69
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@1f645805):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 2
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 62 (DatanodeAdminMonitor-0):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 114
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@40205119):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 69
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 47 (RedundancyMonitor):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 114
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344)
    java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446)
    app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 48 (MarkedDeleteBlockScrubberThread):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 33637
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 51 (Block report processor):
  State: WAITING
  Blocked count: 4
  Waited count: 1513
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@c2821ea
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420)
    app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627)
    app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614)
Thread 57 (IPC Server Responder):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
    app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733)
    app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716)
Thread 54 (IPC Server listener on 0):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559)
Thread 64 (IPC Server handler 0 on default port 41407):
  State: TIMED_WAITING
  Blocked count: 54
  Waited count: 2098
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 65 (IPC Server handler 1 on default port 41407):
  State: TIMED_WAITING
  Blocked count: 49
  Waited count: 2115
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 66 (IPC Server handler 2 on default port 41407):
  State: TIMED_WAITING
  Blocked count: 53
  Waited count: 2113
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 67 (IPC Server handler 3 on default port 41407):
  State: TIMED_WAITING
  Blocked count: 60
  Waited count: 2105
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 68 (IPC Server handler 4 on default port 41407):
  State: TIMED_WAITING
  Blocked count: 46
  Waited count: 2118
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 69 (pool-12-thread-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@34326363):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 171
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@21d9c23a):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 69
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2b27bce4):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 2
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@6f79ee98):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 3
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 75 (CacheReplicationMonitor(1867710578)):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 13
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759)
    app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186)
Thread 86 (pool-18-thread-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 87 (qtp1072063765-87):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183)
    app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137)
    app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff88428988.run(Unknown Source)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 88 (qtp1072063765-88-acceptor-0@4c0c4250-ServerConnector@16165456{HTTP/1.1, (http/1.1)}{localhost:45189}):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/sun.nio.ch.Net.accept(Native Method)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391)
    app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388)
    app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 89 (qtp1072063765-89):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 6
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 90 (qtp1072063765-90):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 6
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 91 (Session-HouseKeeper-5672d73d-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 92 (nioEventLoopGroup-2-1):
  State: RUNNABLE
  Blocked count: 1
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68)
    app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879)
    app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526)
    app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994)
    app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@47b57b32):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 680
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 95 (Socket Reader #1 for port 0):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497)
    app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476)
Thread 96 (IPC Server idle connection scanner for port 44671):
  State: TIMED_WAITING
  Blocked count: 1
  Waited count: 35
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563)
    java.base@17.0.11/java.util.TimerThread.run(Timer.java:516)
Thread 98 (Hadoop-Metrics-Updater-0):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 68
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 101 (Command processor):
  State: WAITING
  Blocked count: 0
  Waited count: 289
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7a781124
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411)
    app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395)
Thread 102 (BP-889361663-172.17.0.2-1733631978419 heartbeating to localhost/127.0.0.1:41407):
  State: TIMED_WAITING
  Blocked count: 1245
  Waited count: 1359
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158)
    app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771)
    app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 103 (pool-20-thread-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4ab9638):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.Net.accept(Native Method)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391)
    java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126)
    app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85)
    app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 97 (IPC Server Responder):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
    app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733)
    app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716)
Thread 94 (IPC Server listener on 0):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559)
Thread 104 (IPC Server handler 0 on default port 44671):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 342
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 105 (IPC Server handler 1 on default port 44671):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 341
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 106 (IPC Server handler 2 on default port 44671):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 340
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 107 (IPC Server handler 3 on default port 44671):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 340
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 108 (IPC Server handler 4 on default port 44671):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 340
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 120 (pool-26-thread-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 113 (IPC Client (30462390) connection to localhost/127.0.0.1:41407 from jenkins):
  State: RUNNABLE
  Blocked count: 1295
  Waited count: 1295
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
    app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335)
    app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156)
    app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161)
    app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131)
    java.base@17.0.11/java.io.FilterInputStream.read(FilterInputStream.java:132)
    java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244)
    java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263)
    java.base@17.0.11/java.io.FilterInputStream.read(FilterInputStream.java:82)
    java.base@17.0.11/java.io.FilterInputStream.read(FilterInputStream.java:82)
    app//org.apache.hadoop.ipc.Client$Connection$PingInputStream.read(Client.java:518)
    java.base@17.0.11/java.io.DataInputStream.readInt(DataInputStream.java:381)
    app//org.apache.hadoop.ipc.Client$IpcStreams.readResponse(Client.java:1923)
    app//org.apache.hadoop.ipc.Client$Connection.receiveRpcResponse(Client.java:1203)
    app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1094)
Thread 114 (IPC Parameter Sending Thread for localhost/127.0.0.1:41407):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1900
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704)
    java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903)
    app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 121 (qtp440766313-121):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183)
    app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137)
    app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff88428988.run(Unknown Source)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 122 (qtp440766313-122-acceptor-0@1311a7c5-ServerConnector@53bdbdf4{HTTP/1.1, (http/1.1)}{localhost:42377}):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/sun.nio.ch.Net.accept(Native Method)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391)
    app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388)
    app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 123 (qtp440766313-123):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 6
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 124 (qtp440766313-124):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 6
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 125 (Session-HouseKeeper-16c500e0-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 127 (nioEventLoopGroup-4-1):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68)
    app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879)
    app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526)
    app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994)
    app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 128 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@1738d90b):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 679
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 130 (Socket Reader #1 for port 0):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497)
    app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476)
Thread 131 (IPC Server idle connection scanner for port 37189):
  State: TIMED_WAITING
  Blocked count: 1
  Waited count: 35
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563)
    java.base@17.0.11/java.util.TimerThread.run(Timer.java:516)
Thread 133 (Hadoop-Metrics-Updater-0):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 68
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 137 (Command processor):
  State: WAITING
  Blocked count: 1
  Waited count: 278
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56b92a9d
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411)
    app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395)
Thread 138 (BP-889361663-172.17.0.2-1733631978419 heartbeating to localhost/127.0.0.1:41407):
  State: TIMED_WAITING
  Blocked count: 1235
  Waited count: 1350
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158)
    app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771)
    app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 139 (pool-29-thread-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 119 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@28ee108f):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.Net.accept(Native Method)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391)
    java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126)
    app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85)
    app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 132 (IPC Server Responder):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
    app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733)
    app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716)
Thread 129 (IPC Server listener on 0):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559)
Thread 140 (IPC Server handler 0 on default port 37189):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 340
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 141 (IPC Server handler 1 on default port 37189):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 340
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 142 (IPC Server handler 2 on default port 37189):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 340
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 143 (IPC Server handler 3 on default port 37189):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 340
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 144 (IPC Server handler 4 on default port 37189):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 340
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 151 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data1)):
  State: TIMED_WAITING
  Blocked count: 5
  Waited count: 2
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656)
Thread 152 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data2)):
  State: TIMED_WAITING
  Blocked count: 9
  Waited count: 2
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656)
Thread 158 (pool-39-thread-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 161 (qtp1849637494-161):
  State: RUNNABLE
  Blocked count: 1
  Waited count: 1
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183)
    app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137)
    app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff88428988.run(Unknown Source)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 162 (qtp1849637494-162-acceptor-0@63906456-ServerConnector@524316e2{HTTP/1.1, (http/1.1)}{localhost:40445}):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/sun.nio.ch.Net.accept(Native Method)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391)
    app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388)
    app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 163 (qtp1849637494-163):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 6
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 165 (qtp1849637494-165):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 6
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 166 (Session-HouseKeeper-27c68f4d-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 164 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data3)):
  State: TIMED_WAITING
  Blocked count: 1
  Waited count: 2
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656)
Thread 169 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data4)):
  State: TIMED_WAITING
  Blocked count: 1
  Waited count: 2
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656)
Thread 172 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data2/current/BP-889361663-172.17.0.2-1733631978419):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 171 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data1/current/BP-889361663-172.17.0.2-1733631978419):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data3/current/BP-889361663-172.17.0.2-1733631978419):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data4/current/BP-889361663-172.17.0.2-1733631978419):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 188 (pool-15-thread-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 189 (pool-23-thread-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 190 (nioEventLoopGroup-6-1):
  State: RUNNABLE
  Blocked count: 1
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68)
    app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879)
    app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526)
    app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994)
    app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 193 (java.util.concurrent.ThreadPoolExecutor$Worker@56ec85f[State = -1, empty queue]):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 196 (java.util.concurrent.ThreadPoolExecutor$Worker@324b4eae[State = -1, empty queue]):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 197 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@3862465c):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 678
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 199 (Socket Reader #1 for port 0):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497)
    app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476)
Thread 200 (IPC Server idle connection scanner for port 34801):
  State: TIMED_WAITING
  Blocked count: 1
  Waited count: 35
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563)
    java.base@17.0.11/java.util.TimerThread.run(Timer.java:516)
Thread 202 (Hadoop-Metrics-Updater-0):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 68
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 205 (Command processor):
  State: WAITING
  Blocked count: 0
  Waited count: 274
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@16ba07ee
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411)
    app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395)
Thread 206 (BP-889361663-172.17.0.2-1733631978419 heartbeating to localhost/127.0.0.1:41407):
  State: TIMED_WAITING
  Blocked count: 1232
  Waited count: 1358
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158)
    app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771)
    app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 207 (pool-46-thread-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 157 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@1ec78693):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.Net.accept(Native Method)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391)
    java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126)
    app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85)
    app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 201 (IPC Server Responder):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
    app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733)
    app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716)
Thread 198 (IPC Server listener on 0):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559)
Thread 208 (IPC Server handler 0 on default port 34801):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 339
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 209 (IPC Server handler 1 on default port 34801):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 339
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 210 (IPC Server handler 2 on default port 34801):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 339
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 211 (IPC Server handler 3 on default port 34801):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 339
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 212 (IPC Server handler 4 on default port 34801):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 339
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 215 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data5)):
  State: TIMED_WAITING
  Blocked count: 1
  Waited count: 2
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656)
Thread 216 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data6)):
  State: TIMED_WAITING
  Blocked count: 1
  Waited count: 2
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656)
Thread 221 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data5/current/BP-889361663-172.17.0.2-1733631978419):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 222 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data6/current/BP-889361663-172.17.0.2-1733631978419):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 226 (pool-36-thread-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 229 (java.util.concurrent.ThreadPoolExecutor$Worker@3c782deb[State = -1, empty queue]):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 234 (FsDatasetAsyncDiskServiceFixer):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 12
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtility.java:599)
Thread 237 (NIOServerCxnFactory.SelectorThread-1):
  State: RUNNABLE
  Blocked count: 3
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403)
    app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368)
Thread 236 (NIOServerCxnFactory.SelectorThread-0):
  State: RUNNABLE
  Blocked count: 6
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403)
    app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368)
Thread 238 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:55878):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205)
    app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181)
Thread 235 (ConnnectionExpirer):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 34
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554)
Thread 239 (SessionTracker):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 170
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163)
Thread 240 (SyncThread:0):
  State: WAITING
  Blocked count: 33
  Waited count: 726
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7892c0b7
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170)
Thread 241 (ProcessThread(sid:0 cport:55878):):
  State: WAITING
  Blocked count: 0
  Waited count: 826
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6699e2fe
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142)
Thread 242 (RequestThrottler):
  State: WAITING
  Blocked count: 1
  Waited count: 872
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@21932026
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147)
Thread 243 (NIOWorkerThread-1):
  State: WAITING
  Blocked count: 4
  Waited count: 132
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
  Inactive
Thread 254 (Time-limited test.named-queue-events-pool-0):
  State: WAITING
  Blocked count: 0
  Waited count: 1
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@33adabda
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47)
    app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56)
    app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159)
    app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 255 (HBase-Metrics2-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 268
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 256 (RS-EventLoopGroup-1-1):
  State: RUNNABLE
  Blocked count: 30
  Waited count: 0
  Stack:
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 257 (Time-limited test-SendThread(127.0.0.1:55878)):
  State: RUNNABLE
  Blocked count: 29
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
    app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332)
    app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289)
Thread 258 (Time-limited test-EventThread):
  State: WAITING
  Blocked count: 1
  Waited count: 57
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22a0a024
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550)
Thread 259 (NIOWorkerThread-2):
  State: WAITING
  Blocked count: 1
  Waited count: 132
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 260 (NIOWorkerThread-3):
  State: WAITING
  Blocked count: 0
  Waited count: 131
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 261 (NIOWorkerThread-4):
  State: WAITING
  Blocked count: 1
  Waited count: 133
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 262 (zk-event-processor-pool-0):
  State: WAITING
  Blocked count: 37
  Waited count: 88
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d76ba68
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 263 (NIOWorkerThread-5):
  State: WAITING
  Blocked count: 0
  Waited count: 132
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 264 (NIOWorkerThread-6):
  State: WAITING
  Blocked count: 1
  Waited count: 131
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 265 (NIOWorkerThread-7):
  State: WAITING
  Blocked count: 2
  Waited count: 133
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 266 (NIOWorkerThread-8):
  State: WAITING
  Blocked count: 2
  Waited count: 131
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 267 (NIOWorkerThread-9):
  State: WAITING
  Blocked count: 0
  Waited count: 131
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 268 (NIOWorkerThread-10):
  State: WAITING
  Blocked count: 2
  Waited count: 132
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 269 (NIOWorkerThread-11):
  State: WAITING
  Blocked count: 0
  Waited count: 132
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 270 (NIOWorkerThread-12):
  State: WAITING
  Blocked count: 2
  Waited count: 131
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 271 (NIOWorkerThread-13):
  State: WAITING
  Blocked count: 1
  Waited count: 131
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 272 (NIOWorkerThread-14):
  State: WAITING
  Blocked count: 1
  Waited count: 131
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 273 (NIOWorkerThread-15):
  State: WAITING
  Blocked count: 3
  Waited count: 131
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 274 (NIOWorkerThread-16):
  State: WAITING
  Blocked count: 3
  Waited count: 132
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 275 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46337):
  State: WAITING
  Blocked count: 0
  Waited count: 1
  Waiting on java.util.concurrent.Semaphore$NonfairSync@35b2967
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047)
    java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318)
    app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85)
Thread 276 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337):
  State: WAITING
  Blocked count: 293
  Waited count: 1068
  Waiting on java.util.concurrent.Semaphore$NonfairSync@48909076
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047)
    java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318)
    app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85)
Thread 277 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337):
  State: WAITING
  Blocked count: 11
  Waited count: 83
  Waiting on java.util.concurrent.Semaphore$NonfairSync@6f96b8c0
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047)
    java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318)
    app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85)
Thread 278 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46337):
  State: WAITING
  Blocked count: 76
  Waited count: 6057
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@75bfaa0
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85)
Thread 279 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46337):
  State: WAITING
  Blocked count: 0
  Waited count: 1
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d91eb3d
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85)
Thread 280 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46337):
  State: WAITING
  Blocked count: 0
  Waited count: 1
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d91eb3d
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85)
Thread 281 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=46337):
  State: WAITING
  Blocked count: 0
  Waited count: 1
  Waiting on java.util.concurrent.Semaphore$NonfairSync@114ed3d9
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047)
    java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318)
    app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85)
Thread 282 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=46337):
  State: WAITING
  Blocked count: 0
  Waited count: 1
  Waiting on java.util.concurrent.Semaphore$NonfairSync@4b442988
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047)
    java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318)
    app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85)
Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=46337):
  State: WAITING
  Blocked count: 0
  Waited count: 1
  Waiting on java.util.concurrent.Semaphore$NonfairSync@139244ba
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047)
    java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318)
    app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85)
Thread 284 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=46337):
  State: WAITING
  Blocked count: 0
  Waited count: 3
  Waiting on java.util.concurrent.Semaphore$NonfairSync@41889df6
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047)
    java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318)
    app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85)
Thread 288 (RS-EventLoopGroup-3-1):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 310 (RS-EventLoopGroup-4-1):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 332 (RS-EventLoopGroup-5-1):
  State: RUNNABLE
  Blocked count: 73
  Waited count: 4
  Stack:
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 286 (M:0;428ded7e54d6:46337):
  State: TIMED_WAITING
  Blocked count: 6
  Waited count: 2765
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759)
    app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169)
    app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:879)
    app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doSync(AsyncFSWAL.java:671)
    app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$1(AbstractFSWAL.java:600)
    app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$959/0x00007eff88ef0480.run(Unknown Source)
    app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187)
    app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:600)
    app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:590)
    app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2869)
    app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2811)
    app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2670)
    app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2644)
    app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2635)
    app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1810)
    app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1631)
    app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1586)
    app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1569)
Thread 355 (Monitor thread for TaskMonitor):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 34
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 357 (master/428ded7e54d6:0:becomeActiveMaster-MemStoreChunkPool Statistics):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 2
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 359 (master/428ded7e54d6:0:becomeActiveMaster-MemStoreChunkPool Statistics):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 2
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 361 (org.apache.hadoop.hdfs.PeerCache@5df40f7f):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 112
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253)
    app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46)
    app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 380 (master:store-WAL-Roller):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 3354
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:179)
Thread 397 (RS-EventLoopGroup-5-2):
  State: RUNNABLE
  Blocked count: 79
  Waited count: 4
  Stack:
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 398 (RS-EventLoopGroup-5-3):
  State: RUNNABLE
  Blocked count: 90
  Waited count: 4
  Stack:
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 410 (Idle-Rpc-Conn-Sweeper-pool-0):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 55
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 421 (SnapshotHandlerChoreCleaner):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 34
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 409 (RpcClient-timer-pool-0):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 33465
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598)
    app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 430 (RS-EventLoopGroup-1-2):
  State: RUNNABLE
  Blocked count: 28
  Waited count: 0
  Stack:
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 431 (RS-EventLoopGroup-1-3):
  State: RUNNABLE
  Blocked count: 29
  Waited count: 0
  Stack:
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 454 (RegionServerTracker-0):
  State: WAITING
  Blocked count: 9
  Waited count: 12
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@313bc781
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 474 (regionserver/428ded7e54d6:0.procedureResultReporter):
  State: WAITING
  Blocked count: 12
  Waited count: 25
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@24c98781
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75)
Thread 476 (regionserver/428ded7e54d6:0.procedureResultReporter):
  State: WAITING
  Blocked count: 13
  Waited count: 27
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2655cf2a
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75)
Thread 484 (regionserver/428ded7e54d6:0.procedureResultReporter):
  State: WAITING
  Blocked count: 16
  Waited count: 33
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@619c3790
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75)
Thread 508 (LeaseRenewer:jenkins.hfs.0@localhost:41407):
  State: TIMED_WAITING
  Blocked count: 9
  Waited count: 351
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441)
    app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77)
    app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 511 (LeaseRenewer:jenkins.hfs.1@localhost:41407):
  State: TIMED_WAITING
  Blocked count: 9
  Waited count: 348
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441)
    app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77)
    app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 513 (LeaseRenewer:jenkins.hfs.2@localhost:41407):
  State: TIMED_WAITING
  Blocked count: 9
  Waited count: 348
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441)
    app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77)
    app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 529 (region-location-0):
  State: WAITING
  Blocked count: 8
  Waited count: 13
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@ef4f3ed
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 556 (Async-Client-Retry-Timer-pool-0):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 33220
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598)
    app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 569 (RPCClient-NioEventLoopGroup-6-1):
  State: RUNNABLE
  Blocked count: 6
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 591 (region-location-1):
  State: WAITING
  Blocked count: 3
  Waited count: 9
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@ef4f3ed
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 592 (region-location-2):
  State: WAITING
  Blocked count: 2
  Waited count: 7
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@ef4f3ed
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 593 (region-location-3):
  State: WAITING
  Blocked count: 2
  Waited count: 6
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@ef4f3ed
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 595 (ForkJoinPool.commonPool-worker-2):
  State: WAITING
  Blocked count: 0
  Waited count: 602
  Waiting on java.util.concurrent.ForkJoinPool@66e9883d
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623)
    java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165)
Thread 1015 (MutableQuantiles-0):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 416
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 1076 (RPCClient-NioEventLoopGroup-6-2):
  State: RUNNABLE
  Blocked count: 11
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 1104 (RS-EventLoopGroup-4-2):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 1115 (zk-permission-watcher-pool-0):
  State: WAITING
  Blocked count: 61
  Waited count: 93
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6e6dadab
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 1173 (RPCClient-NioEventLoopGroup-6-3):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 1174 (RS-EventLoopGroup-4-3):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 1282 (ForkJoinPool.commonPool-worker-3):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 695
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623)
    java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165)
Thread 1531 (Container metrics unregistration):
  State: WAITING
  Blocked count: 10
  Waited count: 33
  Waiting on java.util.TaskQueue@47af5010
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    java.base@17.0.11/java.lang.Object.wait(Object.java:338)
    java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537)
    java.base@17.0.11/java.util.TimerThread.run(Timer.java:516)
Thread 1854 (RS-EventLoopGroup-3-2):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 1913 (RPCClient-NioEventLoopGroup-6-4):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 1914 (RS-EventLoopGroup-3-3):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 2824 (region-location-4):
  State: WAITING
  Blocked count: 1
  Waited count: 5
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@ef4f3ed
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 4147 (ForkJoinPool.commonPool-worker-4):
  State: WAITING
  Blocked count: 0
  Waited count: 264
  Waiting on java.util.concurrent.ForkJoinPool@66e9883d
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623)
    java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165)
Thread 4946 (RPCClient-NioEventLoopGroup-6-5):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 4947 (RPCClient-NioEventLoopGroup-6-6):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 4948 (RPCClient-NioEventLoopGroup-6-7):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 9175 (AsyncFSWAL-1-hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/MasterData-prefix:428ded7e54d6,46337,1733631983069):
  State: WAITING
  Blocked count: 0
  Waited count: 1
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6dad80b8
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 9179 (Timer for 'JobHistoryServer' metrics system):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 3
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563)
    java.base@17.0.11/java.util.TimerThread.run(Timer.java:516)
2024-12-08T04:32:22,005 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details.
2024-12-08T04:32:52,005 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details.
Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;428ded7e54d6:46337
220 active threads
Thread 1 (main):
  State: TIMED_WAITING
  Blocked count: 1
  Waited count: 4
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444)
    java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203)
    app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167)
    app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128)
    app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39)
    app//org.junit.rules.RunRules.evaluate(RunRules.java:20)
    app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306)
    app//org.junit.runners.ParentRunner.run(ParentRunner.java:413)
    app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316)
    app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240)
    app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214)
    app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155)
    app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385)
    app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162)
    app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507)
    app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495)
Thread 2 (Reference Handler):
  State: RUNNABLE
  Blocked count: 6
  Waited count: 0
  Stack:
    java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method)
    java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253)
    java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215)
Thread 3 (Finalizer):
  State: WAITING
  Blocked count: 18
  Waited count: 14
  Waiting on java.lang.ref.ReferenceQueue$Lock@6bbd803
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155)
    java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176)
    java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172)
Thread 4 (Signal Dispatcher):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
Thread 12 (Common-Cleaner):
  State: TIMED_WAITING
  Blocked count: 15
  Waited count: 19
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155)
    java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
    java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162)
Thread 13 (Notification Thread):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
Thread 14 (pool-1-thread-1):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 23
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200)
    java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281)
    java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 15 (pool-1-thread-2):
  State: WAITING
  Blocked count: 0
  Waited count: 20
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5ff45b9e
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420)
    java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275)
    java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 16 (surefire-forkedjvm-stream-flusher):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 4046
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 18 (surefire-forkedjvm-command-thread):
  State: WAITING
  Blocked count: 0
  Waited count: 41
  Waiting on java.util.concurrent.CountDownLatch$Sync@69de1dd2
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047)
    java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230)
    java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178)
    app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127)
    java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244)
    java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284)
    java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343)
    app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169)
    app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50)
    app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430)
    app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419)
    app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116)
    app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77)
    app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60)
    app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 22 (Time-limited test):
  State: RUNNABLE
  Blocked count: 12117
  Waited count: 12667
  Stack:
    java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method)
    java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197)
    java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154)
    app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181)
    app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186)
    app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113)
    app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:394)
    app//org.apache.hadoop.hbase.MiniHBaseCluster.waitUntilShutDown(MiniHBaseCluster.java:921)
    app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniHBaseCluster(HBaseTestingUtility.java:1359)
    app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniCluster(HBaseTestingUtility.java:1341)
    app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121)
    java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77)
    java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
    java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568)
    app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59)
    app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
    app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56)
    app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46)
    app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33)
Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner):
  State: WAITING
  Blocked count: 10
  Waited count: 11
  Waiting on java.lang.ref.ReferenceQueue$Lock@3056cb7e
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155)
    java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176)
    app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 25 (SSL Certificates Store Monitor):
  State: WAITING
  Blocked count: 0
  Waited count: 1
  Waiting on java.util.TaskQueue@4d034e41
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    java.base@17.0.11/java.lang.Object.wait(Object.java:338)
    java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537)
    java.base@17.0.11/java.util.TimerThread.run(Timer.java:516)
Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@263e077f):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 803
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 81
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161)
Thread 36 (pool-6-thread-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 37 (qtp2988114-37):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183)
    app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137)
    app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff88428988.run(Unknown Source)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 38 (qtp2988114-38):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183)
    app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137)
    app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff88428988.run(Unknown Source)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 39 (qtp2988114-39):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183)
    app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137)
    app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff88428988.run(Unknown Source)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 40 (qtp2988114-40):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183)
    app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137)
    app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff88428988.run(Unknown Source)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 41 (qtp2988114-41-acceptor-0@2a09dac-ServerConnector@2d3d9b09{HTTP/1.1, (http/1.1)}{localhost:40303}):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/sun.nio.ch.Net.accept(Native Method)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391)
    app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388)
    app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 42 (qtp2988114-42):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 7
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 43 (qtp2988114-43):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 7
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 44 (qtp2988114-44):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 7
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 45 (Session-HouseKeeper-4898edba-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 46 (pool-7-thread-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 53 (FSEditLogAsync):
  State: WAITING
  Blocked count: 27
  Waited count: 3058
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60ef7501
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420)
    app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241)
    app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 55 (Socket Reader #1 for port 0):
  State: RUNNABLE
  Blocked count: 1
  Waited count: 1
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497)
    app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476)
Thread 56 (IPC Server idle connection scanner for port 41407):
  State: TIMED_WAITING
  Blocked count: 1
  Waited count: 42
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563)
    java.base@17.0.11/java.util.TimerThread.run(Timer.java:516)
Thread 58 (Hadoop-Metrics-Updater-0):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 81
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@1f645805):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 2
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 62 (DatanodeAdminMonitor-0):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 134
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@40205119):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 81
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 47 (RedundancyMonitor):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 134
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344)
    java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446)
    app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 48 (MarkedDeleteBlockScrubberThread):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 39602
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 51 (Block report processor):
  State: WAITING
  Blocked count: 4
  Waited count: 1513
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@c2821ea
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420)
    app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627)
    app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614)
Thread 57 (IPC Server Responder):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
    app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733)
    app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716)
Thread 54 (IPC Server listener on 0):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559)
Thread 64 (IPC Server handler 0 on default port 41407):
  State: TIMED_WAITING
  Blocked count: 54
  Waited count: 2160
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 65 (IPC Server handler 1 on default port 41407):
  State: TIMED_WAITING
  Blocked count: 49
  Waited count: 2176
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 66 (IPC Server handler 2 on default port 41407):
  State: TIMED_WAITING
  Blocked count: 53
  Waited count: 2174
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 67 (IPC Server handler 3 on default port 41407):
  State: TIMED_WAITING
  Blocked count: 60
  Waited count: 2166
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 68 (IPC Server handler 4 on default port 41407):
  State: TIMED_WAITING
  Blocked count: 46
  Waited count: 2179
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 69 (pool-12-thread-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@34326363):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 201
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@21d9c23a):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 81
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2b27bce4):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 2
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@6f79ee98):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 3
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 75 (CacheReplicationMonitor(1867710578)):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 15
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759)
    app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186)
Thread 86 (pool-18-thread-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 87 (qtp1072063765-87):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183)
    app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137)
    app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff88428988.run(Unknown Source)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 88 (qtp1072063765-88-acceptor-0@4c0c4250-ServerConnector@16165456{HTTP/1.1, (http/1.1)}{localhost:45189}):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/sun.nio.ch.Net.accept(Native Method)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391)
    app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388)
    app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 89 (qtp1072063765-89):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 7
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 90 (qtp1072063765-90):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 7
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 91 (Session-HouseKeeper-5672d73d-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 92 (nioEventLoopGroup-2-1):
  State: RUNNABLE
  Blocked count: 1
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68)
    app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879)
    app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526)
    app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994)
    app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@47b57b32):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 800
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 95 (Socket Reader #1 for port 0):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497)
    app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476)
Thread 96 (IPC Server idle connection scanner for port 44671):
  State: TIMED_WAITING
  Blocked count: 1
  Waited count: 41
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563)
    java.base@17.0.11/java.util.TimerThread.run(Timer.java:516)
Thread 98 (Hadoop-Metrics-Updater-0):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 80
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 101 (Command processor):
  State: WAITING
  Blocked count: 0
  Waited count: 309
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7a781124
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411)
    app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395)
Thread 102 (BP-889361663-172.17.0.2-1733631978419 heartbeating to localhost/127.0.0.1:41407):
  State: TIMED_WAITING
  Blocked count: 1265
  Waited count: 1399
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158)
    app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771)
    app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 103 (pool-20-thread-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4ab9638):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.Net.accept(Native Method)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391)
    java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126)
    app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85)
    app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 97 (IPC Server Responder):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
    app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733)
    app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716)
Thread 94 (IPC Server listener on 0):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559)
Thread 104 (IPC Server handler 0 on default port 44671):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 402
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 105 (IPC Server handler 1 on default port 44671):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 401
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 106 (IPC Server handler 2 on default port 44671):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 400
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 107 (IPC Server handler 3 on default port 44671):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 400
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 108 (IPC Server handler 4 on default port 44671):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 400
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 120 (pool-26-thread-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 113 (IPC Client (30462390) connection to localhost/127.0.0.1:41407 from jenkins):
  State: TIMED_WAITING
  Blocked count: 1355
  Waited count: 1356
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042)
    app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093)
Thread 114 (IPC Parameter Sending Thread for localhost/127.0.0.1:41407):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1960
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704)
    java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903)
    app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 121 (qtp440766313-121):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183)
    app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137)
    app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff88428988.run(Unknown Source)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 122 (qtp440766313-122-acceptor-0@1311a7c5-ServerConnector@53bdbdf4{HTTP/1.1, (http/1.1)}{localhost:42377}):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/sun.nio.ch.Net.accept(Native Method)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391)
    app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388)
    app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 123 (qtp440766313-123):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 7
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 124 (qtp440766313-124):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 7
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 125 (Session-HouseKeeper-16c500e0-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 127 (nioEventLoopGroup-4-1):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68)
    app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879)
    app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526)
    app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994)
    app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 128 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@1738d90b):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 799
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 130 (Socket Reader #1 for port 0):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497)
    app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476)
Thread 131 (IPC Server idle connection scanner for port 37189):
  State: TIMED_WAITING
  Blocked count: 1
  Waited count: 41
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563)
    java.base@17.0.11/java.util.TimerThread.run(Timer.java:516)
Thread 133 (Hadoop-Metrics-Updater-0):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 80
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 137 (Command processor):
  State: WAITING
  Blocked count: 1
  Waited count: 298
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56b92a9d
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411)
    app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395)
Thread 138 (BP-889361663-172.17.0.2-1733631978419 heartbeating to localhost/127.0.0.1:41407):
  State: TIMED_WAITING
  Blocked count: 1255
  Waited count: 1390
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158)
    app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771)
    app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 139 (pool-29-thread-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 119 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@28ee108f):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.Net.accept(Native Method)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391)
    java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126)
    app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85)
    app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 132 (IPC Server Responder):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
    app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733)
    app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716)
Thread 129 (IPC Server listener on 0):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559)
Thread 140 (IPC Server handler 0 on default port 37189):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 400
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 141 (IPC Server handler 1 on default port 37189):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 400
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 142 (IPC Server handler 2 on default port 37189):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 400
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 143 (IPC Server handler 3 on default port 37189):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 400
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 144 (IPC Server handler 4 on default port 37189):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 400
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 151 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data1)):
  State: TIMED_WAITING
  Blocked count: 5
  Waited count: 2
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656)
Thread 152 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data2)):
  State: TIMED_WAITING
  Blocked count: 9
  Waited count: 2
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656)
Thread 158 (pool-39-thread-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 161 (qtp1849637494-161):
  State: RUNNABLE
  Blocked count: 1
  Waited count: 1
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183)
    app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137)
    app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff88428988.run(Unknown Source)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 162 (qtp1849637494-162-acceptor-0@63906456-ServerConnector@524316e2{HTTP/1.1, (http/1.1)}{localhost:40445}):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/sun.nio.ch.Net.accept(Native Method)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391)
    app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388)
    app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 163 (qtp1849637494-163):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 7
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 165 (qtp1849637494-165):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 7
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 166 (Session-HouseKeeper-27c68f4d-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 164 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data3)):
  State: TIMED_WAITING
  Blocked count: 1
  Waited count: 2
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656)
Thread 169 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data4)):
  State: TIMED_WAITING
  Blocked count: 1
  Waited count: 2
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656)
Thread 172 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data2/current/BP-889361663-172.17.0.2-1733631978419):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 171 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data1/current/BP-889361663-172.17.0.2-1733631978419):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data3/current/BP-889361663-172.17.0.2-1733631978419):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data4/current/BP-889361663-172.17.0.2-1733631978419):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 188 (pool-15-thread-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 189 (pool-23-thread-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 190 (nioEventLoopGroup-6-1):
  State: RUNNABLE
  Blocked count: 1
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68)
    app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879)
    app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526)
    app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994)
    app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 193 (java.util.concurrent.ThreadPoolExecutor$Worker@56ec85f[State = -1, empty queue]):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 196 (java.util.concurrent.ThreadPoolExecutor$Worker@324b4eae[State = -1, empty queue]):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 197 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@3862465c):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 798
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 199 (Socket Reader #1 for port 0):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497)
    app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476)
Thread 200 (IPC Server idle connection scanner for port 34801):
  State: TIMED_WAITING
  Blocked count: 1
  Waited count: 41
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563)
    java.base@17.0.11/java.util.TimerThread.run(Timer.java:516)
Thread 202 (Hadoop-Metrics-Updater-0):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 80
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 205 (Command processor):
  State: WAITING
  Blocked count: 0
  Waited count: 294
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@16ba07ee
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411)
    app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395)
Thread 206 (BP-889361663-172.17.0.2-1733631978419 heartbeating to localhost/127.0.0.1:41407):
  State: TIMED_WAITING
  Blocked count: 1252
  Waited count: 1398
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158)
    app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771)
    app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 207 (pool-46-thread-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 157 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@1ec78693):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.Net.accept(Native Method)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391)
    java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126)
    app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85)
    app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 201 (IPC Server Responder):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
    app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733)
    app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716)
Thread 198 (IPC Server listener on 0):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559)
Thread 208 (IPC Server handler 0 on default port 34801):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 399
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 209 (IPC Server handler 1 on default port 34801):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 399
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 210 (IPC Server handler 2 on default port 34801):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 399
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 211 (IPC Server handler 3 on default port 34801):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 399
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 212 (IPC Server handler 4 on default port 34801):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 399
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 215 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data5)):
  State: TIMED_WAITING
  Blocked count: 1
  Waited count: 2
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656)
Thread 216 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data6)):
  State: TIMED_WAITING
  Blocked count: 1
  Waited count: 2
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656)
Thread 221 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data5/current/BP-889361663-172.17.0.2-1733631978419):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 222 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data6/current/BP-889361663-172.17.0.2-1733631978419):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 226 (pool-36-thread-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 229 (java.util.concurrent.ThreadPoolExecutor$Worker@3c782deb[State = -1, empty queue]):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 234 (FsDatasetAsyncDiskServiceFixer):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 14
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtility.java:599)
Thread 237 (NIOServerCxnFactory.SelectorThread-1):
  State: RUNNABLE
  Blocked count: 3
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403)
    app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368)
Thread 236 (NIOServerCxnFactory.SelectorThread-0):
  State: RUNNABLE
  Blocked count: 6
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403)
    app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368)
Thread 238 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:55878):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205)
    app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181)
Thread 235 (ConnnectionExpirer):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 40
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554)
Thread 239 (SessionTracker):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 200
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163)
Thread 240 (SyncThread:0):
  State: WAITING
  Blocked count: 33
  Waited count: 731
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7892c0b7
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170)
Thread 241 (ProcessThread(sid:0 cport:55878):):
  State: WAITING
  Blocked count: 0
  Waited count: 831
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6699e2fe
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142)
Thread 242 (RequestThrottler):
  State: WAITING
  Blocked count: 1
  Waited count: 877
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@21932026
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147)
Thread 243 (NIOWorkerThread-1):
  State: WAITING
  Blocked count: 4
  Waited count: 133
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 254 (Time-limited test.named-queue-events-pool-0):
  State: WAITING
  Blocked count: 0
  Waited count: 1
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@33adabda
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47)
    app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56)
    app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159)
    app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 255 (HBase-Metrics2-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 296
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 256 (RS-EventLoopGroup-1-1):
  State: RUNNABLE
  Blocked count: 30
  Waited count: 0
  Stack:
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 257 (Time-limited test-SendThread(127.0.0.1:55878)):
  State: RUNNABLE
  Blocked count: 29
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
    app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332)
    app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289)
Thread 258 (Time-limited test-EventThread):
  State: WAITING
  Blocked count: 1
  Waited count: 57
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22a0a024
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550)
Thread 259 (NIOWorkerThread-2):
  State: WAITING
  Blocked count: 1
  Waited count: 132
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 260 (NIOWorkerThread-3):
  State: WAITING
  Blocked count: 0
  Waited count: 132
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 261 (NIOWorkerThread-4):
  State: WAITING
  Blocked count: 1
  Waited count: 133
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 262 (zk-event-processor-pool-0):
  State: WAITING
  Blocked count: 37
  Waited count: 88
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d76ba68
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 263 (NIOWorkerThread-5):
  State: WAITING
  Blocked count: 0
  Waited count: 132
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 264 (NIOWorkerThread-6):
  State: WAITING
  Blocked count: 1
  Waited count: 131
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 265 (NIOWorkerThread-7):
  State: WAITING
  Blocked count: 2
  Waited count: 133
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 266 (NIOWorkerThread-8):
  State: WAITING
  Blocked count: 2
  Waited count: 132
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 267 (NIOWorkerThread-9):
  State: WAITING
  Blocked count: 0
  Waited count: 132
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 268 (NIOWorkerThread-10):
  State: WAITING
  Blocked count: 2
  Waited count: 133
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 269 (NIOWorkerThread-11):
  State: WAITING
  Blocked count: 0
  Waited count: 132
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 270 (NIOWorkerThread-12):
  State: WAITING
  Blocked count: 2
  Waited count: 132
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 271 (NIOWorkerThread-13):
  State: WAITING
  Blocked count: 1
  Waited count: 132
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 272 (NIOWorkerThread-14):
  State: WAITING
  Blocked count: 1
  Waited count: 132
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 273 (NIOWorkerThread-15):
  State: WAITING
  Blocked count: 3
  Waited count: 132
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 274 (NIOWorkerThread-16):
  State: WAITING
  Blocked count: 3
  Waited count: 133
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 275 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46337):
  State: WAITING
  Blocked count: 0
  Waited count: 1
  Waiting on java.util.concurrent.Semaphore$NonfairSync@35b2967
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047)
    java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318)
    app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85)
Thread 276 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337):
  State: WAITING
  Blocked count: 293
  Waited count: 1068
  Waiting on java.util.concurrent.Semaphore$NonfairSync@48909076
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047)
    java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318)
    app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85)
Thread 277 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337):
  State: WAITING
  Blocked count: 11
  Waited count: 83
  Waiting on java.util.concurrent.Semaphore$NonfairSync@6f96b8c0
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047)
    java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318)
    app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85)
Thread 278 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46337):
  State: WAITING
  Blocked count: 76
  Waited count: 6057
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@75bfaa0
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85)
Thread 279 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46337):
  State: WAITING
  Blocked count: 0
  Waited count: 1
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d91eb3d
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85)
Thread 280 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46337):
  State: WAITING
  Blocked count: 0
  Waited count: 1
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d91eb3d
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85)
Thread 281 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=46337):
  State: WAITING
  Blocked count: 0
  Waited count: 1
  Waiting on java.util.concurrent.Semaphore$NonfairSync@114ed3d9
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047)
    java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318)
    app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85)
Thread 282 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=46337):
  State: WAITING
  Blocked count: 0
  Waited count: 1
  Waiting on java.util.concurrent.Semaphore$NonfairSync@4b442988
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047)
    java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318)
    app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85)
Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=46337):
  State: WAITING
  Blocked count: 0
  Waited count: 1
  Waiting on java.util.concurrent.Semaphore$NonfairSync@139244ba
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047)
    java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318)
    app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85)
Thread 284 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=46337):
  State: WAITING
  Blocked count: 0
  Waited count: 3
  Waiting on java.util.concurrent.Semaphore$NonfairSync@41889df6
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047)
    java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318)
    app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85)
Thread 288 (RS-EventLoopGroup-3-1):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 310 (RS-EventLoopGroup-4-1):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 332 (RS-EventLoopGroup-5-1):
  State: RUNNABLE
  Blocked count: 73
  Waited count: 4
  Stack:
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 286 (M:0;428ded7e54d6:46337):
  State: TIMED_WAITING
  Blocked count: 6
  Waited count: 2765
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759)
    app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169)
    app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:879)
    app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doSync(AsyncFSWAL.java:671)
    app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$1(AbstractFSWAL.java:600)
    app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$959/0x00007eff88ef0480.run(Unknown Source)
    app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187)
    app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:600)
    app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:590)
    app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2869)
    app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2811)
    app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2670)
    app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2644)
    app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2635)
    app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1810)
    app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1631)
    app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1586)
    app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1569)
Thread 355 (Monitor thread for TaskMonitor):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 40
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 357 (master/428ded7e54d6:0:becomeActiveMaster-MemStoreChunkPool Statistics):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 2
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 359 (master/428ded7e54d6:0:becomeActiveMaster-MemStoreChunkPool Statistics):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 2
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 361 (org.apache.hadoop.hdfs.PeerCache@5df40f7f):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 132
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253)
    app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46)
    app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 380 (master:store-WAL-Roller):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 3954
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:179)
Thread 397 (RS-EventLoopGroup-5-2):
  State: RUNNABLE
  Blocked count: 79
  Waited count: 4
  Stack:
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 398 (RS-EventLoopGroup-5-3):
  State: RUNNABLE
  Blocked count: 90
  Waited count: 4
  Stack:
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 410 (Idle-Rpc-Conn-Sweeper-pool-0):
  State: WAITING
  Blocked count: 0
  Waited count: 68
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@488942f0
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 421 (SnapshotHandlerChoreCleaner):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 40
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 409 (RpcClient-timer-pool-0):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 39467
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598)
    app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 430 (RS-EventLoopGroup-1-2):
  State: RUNNABLE
  Blocked count: 28
  Waited count: 0
  Stack:
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 431 (RS-EventLoopGroup-1-3):
  State: RUNNABLE
  Blocked count: 29
  Waited count: 0
  Stack:
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 454 (RegionServerTracker-0):
  State: WAITING
  Blocked count: 9
  Waited count: 12
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@313bc781
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 474 (regionserver/428ded7e54d6:0.procedureResultReporter):
  State: WAITING
  Blocked count: 12
  Waited count: 25
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@24c98781
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75)
Thread 476 (regionserver/428ded7e54d6:0.procedureResultReporter):
  State: WAITING
  Blocked count: 13
  Waited count: 27
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2655cf2a
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75)
Thread 484 (regionserver/428ded7e54d6:0.procedureResultReporter):
  State: WAITING
  Blocked count: 16
  Waited count: 33
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@619c3790
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75)
Thread 529 (region-location-0):
  State: WAITING
  Blocked count: 8
  Waited count: 13
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@ef4f3ed
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 556 (Async-Client-Retry-Timer-pool-0):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 39223
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598)
    app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 569 (RPCClient-NioEventLoopGroup-6-1):
  State: RUNNABLE
  Blocked count: 6
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 591 (region-location-1):
  State: WAITING
  Blocked count: 3
  Waited count: 9
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@ef4f3ed
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 592 (region-location-2):
  State: WAITING
  Blocked count: 2
  Waited count: 7
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@ef4f3ed
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 593 (region-location-3):
  State: WAITING
  Blocked count: 2
  Waited count: 6
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@ef4f3ed
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 595 (ForkJoinPool.commonPool-worker-2):
  State: WAITING
  Blocked count: 0
  Waited count: 602
  Waiting on java.util.concurrent.ForkJoinPool@66e9883d
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623)
    java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165)
Thread 1015 (MutableQuantiles-0):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 422
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 1076 (RPCClient-NioEventLoopGroup-6-2):
  State: RUNNABLE
  Blocked count: 11
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 1104 (RS-EventLoopGroup-4-2):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 1115 (zk-permission-watcher-pool-0):
  State: WAITING
  Blocked count: 61
  Waited count: 93
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6e6dadab
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 1173 (RPCClient-NioEventLoopGroup-6-3):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 1174 (RS-EventLoopGroup-4-3):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 1531 (Container metrics unregistration):
  State: WAITING
  Blocked count: 10
  Waited count: 33
  Waiting on java.util.TaskQueue@47af5010
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    java.base@17.0.11/java.lang.Object.wait(Object.java:338)
    java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537)
    java.base@17.0.11/java.util.TimerThread.run(Timer.java:516)
Thread 1854 (RS-EventLoopGroup-3-2):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 1913 (RPCClient-NioEventLoopGroup-6-4):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 1914 (RS-EventLoopGroup-3-3):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 2824 (region-location-4):
  State: WAITING
  Blocked count: 1
  Waited count: 5
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@ef4f3ed
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 4147 (ForkJoinPool.commonPool-worker-4):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 265
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623)
    java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165)
Thread 4946 (RPCClient-NioEventLoopGroup-6-5):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 4947 (RPCClient-NioEventLoopGroup-6-6):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 4948 (RPCClient-NioEventLoopGroup-6-7):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 9175 (AsyncFSWAL-1-hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/MasterData-prefix:428ded7e54d6,46337,1733631983069):
  State: WAITING
  Blocked count: 0
  Waited count: 1
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6dad80b8
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 9179 (Timer for 'JobHistoryServer' metrics system):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 9
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563)
    java.base@17.0.11/java.util.TimerThread.run(Timer.java:516)
2024-12-08T04:33:22,005 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details.
2024-12-08T04:33:52,005 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details.
Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;428ded7e54d6:46337
219 active threads
Thread 1 (main):
  State: TIMED_WAITING
  Blocked count: 1
  Waited count: 4
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444)
    java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203)
    app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167)
    app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128)
    app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39)
    app//org.junit.rules.RunRules.evaluate(RunRules.java:20)
    app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306)
    app//org.junit.runners.ParentRunner.run(ParentRunner.java:413)
    app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316)
    app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240)
    app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214)
    app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155)
    app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385)
    app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162)
    app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507)
    app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495)
Thread 2 (Reference Handler):
  State: RUNNABLE
  Blocked count: 6
  Waited count: 0
  Stack:
    java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method)
    java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253)
    java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215)
Thread 3 (Finalizer):
  State: WAITING
  Blocked count: 18
  Waited count: 14
  Waiting on java.lang.ref.ReferenceQueue$Lock@6bbd803
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155)
    java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176)
    java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172)
Thread 4 (Signal Dispatcher):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
Thread 12 (Common-Cleaner):
  State: TIMED_WAITING
  Blocked count: 15
  Waited count: 20
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155)
    java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
    java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162)
Thread 13 (Notification Thread):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
Thread 14 (pool-1-thread-1):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 26
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200)
    java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281)
    java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 15 (pool-1-thread-2):
  State: WAITING
  Blocked count: 0
  Waited count: 23
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5ff45b9e
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420)
    java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275)
    java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 16 (surefire-forkedjvm-stream-flusher):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 4646
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 18 (surefire-forkedjvm-command-thread):
  State: WAITING
  Blocked count: 0
  Waited count: 47
  Waiting on java.util.concurrent.CountDownLatch$Sync@544cff29
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047)
    java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230)
    java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178)
    app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127)
    java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244)
    java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284)
    java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343)
    app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169)
    app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50)
    app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430)
    app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419)
    app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116)
    app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77)
    app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60)
    app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 22 (Time-limited test):
  State: RUNNABLE
  Blocked count: 12117
  Waited count: 12668
  Stack:
    java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method)
    java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197)
    java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154)
    app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181)
    app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186)
    app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113)
    app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:394)
    app//org.apache.hadoop.hbase.MiniHBaseCluster.waitUntilShutDown(MiniHBaseCluster.java:921)
    app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniHBaseCluster(HBaseTestingUtility.java:1359)
    app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniCluster(HBaseTestingUtility.java:1341)
    app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121)
    java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77)
    java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
    java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568)
    app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59)
    app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
    app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56)
    app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46)
    app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33)
Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner):
  State: WAITING
  Blocked count: 10
  Waited count: 11
  Waiting on java.lang.ref.ReferenceQueue$Lock@3056cb7e
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155)
    java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176)
    app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 25 (SSL Certificates Store Monitor):
  State: WAITING
  Blocked count: 0
  Waited count: 1
  Waiting on java.util.TaskQueue@4d034e41
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    java.base@17.0.11/java.lang.Object.wait(Object.java:338)
    java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537)
    java.base@17.0.11/java.util.TimerThread.run(Timer.java:516)
Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@263e077f):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 923
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 93
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161)
Thread 36 (pool-6-thread-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 37 (qtp2988114-37):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183)
    app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137)
    app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff88428988.run(Unknown Source)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 38 (qtp2988114-38):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183)
    app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137)
    app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff88428988.run(Unknown Source)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 39 (qtp2988114-39):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183)
    app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137)
    app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff88428988.run(Unknown Source)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 40 (qtp2988114-40):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183)
    app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137)
    app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff88428988.run(Unknown Source)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 41 (qtp2988114-41-acceptor-0@2a09dac-ServerConnector@2d3d9b09{HTTP/1.1, (http/1.1)}{localhost:40303}):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/sun.nio.ch.Net.accept(Native Method)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391)
    app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388)
    app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 42 (qtp2988114-42):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 8
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 43 (qtp2988114-43):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 8
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 44 (qtp2988114-44):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 8
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 45 (Session-HouseKeeper-4898edba-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 46 (pool-7-thread-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 53 (FSEditLogAsync):
  State: WAITING
  Blocked count: 27
  Waited count: 3058
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60ef7501
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420)
    app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241)
    app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 55 (Socket Reader #1 for port 0):
  State: RUNNABLE
  Blocked count: 1
  Waited count: 1
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497)
    app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476)
Thread 56 (IPC Server idle connection scanner for port 41407):
  State: TIMED_WAITING
  Blocked count: 1
  Waited count: 48
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563)
    java.base@17.0.11/java.util.TimerThread.run(Timer.java:516)
Thread 58 (Hadoop-Metrics-Updater-0):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 93
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@1f645805):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 2
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 62 (DatanodeAdminMonitor-0):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 154
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@40205119):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 93
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 47 (RedundancyMonitor):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 154
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344)
    java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446)
    app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 48 (MarkedDeleteBlockScrubberThread):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 45567
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 51 (Block report processor):
  State: WAITING
  Blocked count: 4
  Waited count: 1513
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@c2821ea
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420)
    app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627)
    app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614)
Thread 57 (IPC Server Responder):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
    app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733)
    app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716)
Thread 54 (IPC Server listener on 0):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559)
Thread 64 (IPC Server handler 0 on default port 41407):
  State: TIMED_WAITING
  Blocked count: 54
  Waited count: 2221
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 65 (IPC Server handler 1 on default port 41407):
  State: TIMED_WAITING
  Blocked count: 49
  Waited count: 2237
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 66 (IPC Server handler 2 on default port 41407):
  State: TIMED_WAITING
  Blocked count: 53
  Waited count: 2235
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 67 (IPC Server handler 3 on default port 41407):
  State: TIMED_WAITING
  Blocked count: 60
  Waited count: 2228
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 68 (IPC Server handler 4 on default port 41407):
  State: TIMED_WAITING
  Blocked count: 46
  Waited count: 2240
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 69 (pool-12-thread-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@34326363):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 231
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@21d9c23a):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 93
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2b27bce4):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 2
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@6f79ee98):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 3
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 75 (CacheReplicationMonitor(1867710578)):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 17
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759)
    app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186)
Thread 86 (pool-18-thread-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 87 (qtp1072063765-87):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183)
    app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137)
    app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff88428988.run(Unknown Source)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 88 (qtp1072063765-88-acceptor-0@4c0c4250-ServerConnector@16165456{HTTP/1.1, (http/1.1)}{localhost:45189}):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/sun.nio.ch.Net.accept(Native Method)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391)
    app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388)
    app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 89 (qtp1072063765-89):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 8
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 90 (qtp1072063765-90):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 8
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 91 (Session-HouseKeeper-5672d73d-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 92 (nioEventLoopGroup-2-1):
  State: RUNNABLE
  Blocked count: 1
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68)
    app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879)
    app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526)
    app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994)
    app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@47b57b32):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 920
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 95 (Socket Reader #1 for port 0):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497)
    app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476)
Thread 96 (IPC Server idle connection scanner for port 44671):
  State: TIMED_WAITING
  Blocked count: 1
  Waited count: 47
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563)
    java.base@17.0.11/java.util.TimerThread.run(Timer.java:516)
Thread 98 (Hadoop-Metrics-Updater-0):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 92
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 101 (Command processor):
  State: WAITING
  Blocked count: 0
  Waited count: 330
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7a781124
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411)
    app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395)
Thread 102 (BP-889361663-172.17.0.2-1733631978419 heartbeating to localhost/127.0.0.1:41407):
  State: TIMED_WAITING
  Blocked count: 1286
  Waited count: 1441
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158)
    app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771)
    app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 103 (pool-20-thread-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4ab9638):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.Net.accept(Native Method)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391)
    java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126)
    app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85)
    app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 97 (IPC Server Responder):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
    app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733)
    app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716)
Thread 94 (IPC Server listener on 0):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559)
Thread 104 (IPC Server handler 0 on default port 44671):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 462
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 105 (IPC Server handler 1 on default port 44671):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 461
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 106 (IPC Server handler 2 on default port 44671):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 460
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 107 (IPC Server handler 3 on default port 44671):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 460
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 108 (IPC Server handler 4 on default port 44671):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 460
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 120 (pool-26-thread-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 113 (IPC Client (30462390) connection to localhost/127.0.0.1:41407 from jenkins):
  State: TIMED_WAITING
  Blocked count: 1416
  Waited count: 1417
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042)
    app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093)
Thread 114 (IPC Parameter Sending Thread for localhost/127.0.0.1:41407):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 2021
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704)
    java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903)
    app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 121 (qtp440766313-121):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183)
    app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137)
    app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff88428988.run(Unknown Source)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 122 (qtp440766313-122-acceptor-0@1311a7c5-ServerConnector@53bdbdf4{HTTP/1.1, (http/1.1)}{localhost:42377}):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/sun.nio.ch.Net.accept(Native Method)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391)
    app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388)
    app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 123 (qtp440766313-123):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 8
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 124 (qtp440766313-124):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 8
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 125 (Session-HouseKeeper-16c500e0-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 127 (nioEventLoopGroup-4-1):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68)
    app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879)
    app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526)
    app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994)
    app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 128 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@1738d90b):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 919
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 130 (Socket Reader #1 for port 0):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497)
    app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476)
Thread 131 (IPC Server idle connection scanner for port 37189):
  State: TIMED_WAITING
  Blocked count: 1
  Waited count: 47
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563)
    java.base@17.0.11/java.util.TimerThread.run(Timer.java:516)
Thread 133 (Hadoop-Metrics-Updater-0):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 92
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 137 (Command processor):
  State: WAITING
  Blocked count: 1
  Waited count: 318
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56b92a9d
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411)
    app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395)
Thread 138 (BP-889361663-172.17.0.2-1733631978419 heartbeating to localhost/127.0.0.1:41407):
  State: TIMED_WAITING
  Blocked count: 1275
  Waited count: 1430
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158)
    app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771)
    app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 139 (pool-29-thread-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 119 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@28ee108f):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.Net.accept(Native Method)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391)
    java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126)
    app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85)
    app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 132 (IPC Server Responder):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
    app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733)
    app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716)
Thread 129 (IPC Server listener on 0):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559)
Thread 140 (IPC Server handler 0 on default port 37189):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 460
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 141 (IPC Server handler 1 on default port 37189):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 460
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 142 (IPC Server handler 2 on default port 37189):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 460
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 143 (IPC Server handler 3 on default port 37189):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 460
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 144 (IPC Server handler 4 on default port 37189):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 460
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 151 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data1)):
  State: TIMED_WAITING
  Blocked count: 5
  Waited count: 2
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656)
Thread 152 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data2)):
  State: TIMED_WAITING
  Blocked count: 9
  Waited count: 2
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656)
Thread 158 (pool-39-thread-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 161 (qtp1849637494-161):
  State: RUNNABLE
  Blocked count: 1
  Waited count: 1
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183)
    app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137)
    app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff88428988.run(Unknown Source)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 162 (qtp1849637494-162-acceptor-0@63906456-ServerConnector@524316e2{HTTP/1.1, (http/1.1)}{localhost:40445}):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/sun.nio.ch.Net.accept(Native Method)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391)
    app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388)
    app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 163 (qtp1849637494-163):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 8
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 165 (qtp1849637494-165):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 8
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 166 (Session-HouseKeeper-27c68f4d-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 164 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data3)):
  State: TIMED_WAITING
  Blocked count: 1
  Waited count: 2
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656)
Thread 169 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data4)):
  State: TIMED_WAITING
  Blocked count: 1
  Waited count: 2
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656)
Thread 172 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data2/current/BP-889361663-172.17.0.2-1733631978419):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 171 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data1/current/BP-889361663-172.17.0.2-1733631978419):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data3/current/BP-889361663-172.17.0.2-1733631978419):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data4/current/BP-889361663-172.17.0.2-1733631978419):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 188 (pool-15-thread-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 189 (pool-23-thread-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 190 (nioEventLoopGroup-6-1):
  State: RUNNABLE
  Blocked count: 1
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68)
    app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879)
    app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526)
    app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994)
    app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 193 (java.util.concurrent.ThreadPoolExecutor$Worker@56ec85f[State = -1, empty queue]):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 196 (java.util.concurrent.ThreadPoolExecutor$Worker@324b4eae[State = -1, empty queue]):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 197 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@3862465c):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 918
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 199 (Socket Reader #1 for port 0):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497)
    app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476)
Thread 200 (IPC Server idle connection scanner for port 34801):
  State: TIMED_WAITING
  Blocked count: 1
  Waited count: 47
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563)
    java.base@17.0.11/java.util.TimerThread.run(Timer.java:516)
Thread 202 (Hadoop-Metrics-Updater-0):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 92
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 205 (Command processor):
  State: WAITING
  Blocked count: 0
  Waited count: 314
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@16ba07ee
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411)
    app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395)
Thread 206 (BP-889361663-172.17.0.2-1733631978419 heartbeating to localhost/127.0.0.1:41407):
  State: TIMED_WAITING
  Blocked count: 1272
  Waited count: 1438
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158)
    app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771)
    app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 207 (pool-46-thread-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 157 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@1ec78693):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.Net.accept(Native Method)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391)
    java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126)
    app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85)
    app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 201 (IPC Server Responder):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
    app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733)
    app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716)
Thread 198 (IPC Server listener on 0):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559)
Thread 208 (IPC Server handler 0 on default port 34801):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 459
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 209 (IPC Server handler 1 on default port 34801):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 459
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 210 (IPC Server handler 2 on default port 34801):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 459
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 211 (IPC Server handler 3 on default port 34801):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 459
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 212 (IPC Server handler 4 on default port 34801):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 459
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 215 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data5)):
  State: TIMED_WAITING
  Blocked count: 1
  Waited count: 2
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656)
Thread 216 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data6)):
  State: TIMED_WAITING
  Blocked count: 1
  Waited count: 2
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656)
Thread 221 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data5/current/BP-889361663-172.17.0.2-1733631978419):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 222 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data6/current/BP-889361663-172.17.0.2-1733631978419):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 226 (pool-36-thread-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 229 (java.util.concurrent.ThreadPoolExecutor$Worker@3c782deb[State = -1, empty queue]):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 234 (FsDatasetAsyncDiskServiceFixer):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 16
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtility.java:599)
Thread 237 (NIOServerCxnFactory.SelectorThread-1):
  State: RUNNABLE
  Blocked count: 3
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403)
    app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368)
Thread 236 (NIOServerCxnFactory.SelectorThread-0):
  State: RUNNABLE
  Blocked count: 6
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403)
    app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368)
Thread 238 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:55878):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205)
    app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181)
Thread 235 (ConnnectionExpirer):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 46
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554)
Thread 239 (SessionTracker):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 230
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163)
Thread 240 (SyncThread:0):
  State: WAITING
  Blocked count: 33
  Waited count: 735
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7892c0b7
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170)
Thread 241 (ProcessThread(sid:0 cport:55878):):
  State: WAITING
  Blocked count: 0
  Waited count: 835
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6699e2fe
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142)
Thread 242 (RequestThrottler):
  State: WAITING
  Blocked count: 1
  Waited count: 881
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@21932026
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147)
Thread 243 (NIOWorkerThread-1):
  State: WAITING
  Blocked count: 4
  Waited count: 133
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 254 (Time-limited test.named-queue-events-pool-0):
  State: WAITING
  Blocked count: 0
  Waited count: 1
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@33adabda
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47)
    app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56)
    app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159)
    app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 255 (HBase-Metrics2-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 324
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 256 (RS-EventLoopGroup-1-1):
  State: RUNNABLE
  Blocked count: 30
  Waited count: 0
  Stack:
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 257 (Time-limited test-SendThread(127.0.0.1:55878)):
  State: RUNNABLE
  Blocked count: 29
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
    app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332)
    app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289)
Thread 258 (Time-limited test-EventThread):
  State: WAITING
  Blocked count: 1
  Waited count: 57
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22a0a024
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550)
Thread 259 (NIOWorkerThread-2):
  State: WAITING
  Blocked count: 1
  Waited count: 133
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 260 (NIOWorkerThread-3):
  State: WAITING
  Blocked count: 0
  Waited count: 132
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 261 (NIOWorkerThread-4):
  State: WAITING
  Blocked count: 1
  Waited count: 134
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 262 (zk-event-processor-pool-0):
  State: WAITING
  Blocked count: 37
  Waited count: 88
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d76ba68
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 263 (NIOWorkerThread-5):
  State: WAITING
  Blocked count: 0
  Waited count: 133
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 264 (NIOWorkerThread-6):
  State: WAITING
  Blocked count: 1
  Waited count: 132
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 265 (NIOWorkerThread-7):
  State: WAITING
  Blocked count: 2
  Waited count: 134
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 266 (NIOWorkerThread-8):
  State: WAITING
  Blocked count: 2
  Waited count: 133
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 267 (NIOWorkerThread-9):
  State: WAITING
  Blocked count: 0
  Waited count: 132
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 268 (NIOWorkerThread-10):
  State: WAITING
  Blocked count: 2
  Waited count: 133
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 269 (NIOWorkerThread-11):
  State: WAITING
  Blocked count: 0
  Waited count: 133
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 270 (NIOWorkerThread-12):
  State: WAITING
  Blocked count: 2
  Waited count: 132
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 271 (NIOWorkerThread-13):
  State: WAITING
  Blocked count: 1
  Waited count: 133
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 272 (NIOWorkerThread-14):
  State: WAITING
  Blocked count: 1
  Waited count: 132
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 273 (NIOWorkerThread-15):
  State: WAITING
  Blocked count: 3
  Waited count: 132
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 274 (NIOWorkerThread-16):
  State: WAITING
  Blocked count: 3
  Waited count: 133
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 275 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46337):
  State: WAITING
  Blocked count: 0
  Waited count: 1
  Waiting on java.util.concurrent.Semaphore$NonfairSync@35b2967
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047)
    java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318)
    app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85)
Thread 276 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337):
  State: WAITING
  Blocked count: 293
  Waited count: 1068
  Waiting on java.util.concurrent.Semaphore$NonfairSync@48909076
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047)
    java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318)
    app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85)
Thread 277 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337):
  State: WAITING
  Blocked count: 11
  Waited count: 83
  Waiting on java.util.concurrent.Semaphore$NonfairSync@6f96b8c0
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047)
    java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318)
    app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85)
Thread 278 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46337):
  State: WAITING
  Blocked count: 76
  Waited count: 6057
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@75bfaa0
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85)
Thread 279 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46337):
  State: WAITING
  Blocked count: 0
  Waited count: 1
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d91eb3d
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85)
Thread 280 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46337):
  State: WAITING
  Blocked count: 0
  Waited count: 1
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d91eb3d
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85)
Thread 281 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=46337):
  State: WAITING
  Blocked count: 0
  Waited count: 1
  Waiting on java.util.concurrent.Semaphore$NonfairSync@114ed3d9
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047)
    java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318)
    app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85)
Thread 282 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=46337):
  State: WAITING
  Blocked count: 0
  Waited count: 1
  Waiting on java.util.concurrent.Semaphore$NonfairSync@4b442988
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047)
    java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318)
    app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85)
Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=46337):
  State: WAITING
  Blocked count: 0
  Waited count: 1
  Waiting on java.util.concurrent.Semaphore$NonfairSync@139244ba
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047)
    java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318)
    app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85)
Thread 284 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=46337):
  State: WAITING
  Blocked count: 0
  Waited count: 3
  Waiting on java.util.concurrent.Semaphore$NonfairSync@41889df6
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047)
    java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318)
    app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85)
Thread 288 (RS-EventLoopGroup-3-1):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 310 (RS-EventLoopGroup-4-1):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 332 (RS-EventLoopGroup-5-1):
  State: RUNNABLE
  Blocked count: 73
  Waited count: 4
  Stack:
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 286 (M:0;428ded7e54d6:46337):
  State: TIMED_WAITING
  Blocked count: 6
  Waited count: 2765
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759)
    app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169)
    app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:879)
    app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doSync(AsyncFSWAL.java:671)
    app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$1(AbstractFSWAL.java:600)
    app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$959/0x00007eff88ef0480.run(Unknown Source)
    app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187)
    app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:600)
    app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:590)
    app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2869)
    app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2811)
    app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2670)
    app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2644)
    app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2635)
    app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1810)
    app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1631)
    app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1586)
    app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1569)
Thread 355 (Monitor thread for TaskMonitor):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 46
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 357 (master/428ded7e54d6:0:becomeActiveMaster-MemStoreChunkPool Statistics):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 2
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 359 (master/428ded7e54d6:0:becomeActiveMaster-MemStoreChunkPool Statistics):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 2
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 361 (org.apache.hadoop.hdfs.PeerCache@5df40f7f):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 153
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253)
    app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46)
    app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 380 (master:store-WAL-Roller):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 4553
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:179)
Thread 397 (RS-EventLoopGroup-5-2):
  State: RUNNABLE
  Blocked count: 79
  Waited count: 4
  Stack:
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 398 (RS-EventLoopGroup-5-3):
  State: RUNNABLE
  Blocked count: 90
  Waited count: 4
  Stack:
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 410 (Idle-Rpc-Conn-Sweeper-pool-0):
  State: WAITING
  Blocked count: 0
  Waited count: 68
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@488942f0
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 421 (SnapshotHandlerChoreCleaner):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 46
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 409 (RpcClient-timer-pool-0):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 45469
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598)
    app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 430 (RS-EventLoopGroup-1-2):
  State: RUNNABLE
  Blocked count: 28
  Waited count: 0
  Stack:
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 431 (RS-EventLoopGroup-1-3):
  State: RUNNABLE
  Blocked count: 29
  Waited count: 0
  Stack:
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 454 (RegionServerTracker-0):
  State: WAITING
  Blocked count: 9
  Waited count: 12
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@313bc781
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 474 (regionserver/428ded7e54d6:0.procedureResultReporter):
  State: WAITING
  Blocked count: 12
  Waited count: 25
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@24c98781
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75)
Thread 476 (regionserver/428ded7e54d6:0.procedureResultReporter):
  State: WAITING
  Blocked count: 13
  Waited count: 27
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2655cf2a
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75)
Thread 484 (regionserver/428ded7e54d6:0.procedureResultReporter):
  State: WAITING
  Blocked count: 16
  Waited count: 33
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@619c3790
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75)
Thread 529 (region-location-0):
  State: WAITING
  Blocked count: 8
  Waited count: 13
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@ef4f3ed
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 556 (Async-Client-Retry-Timer-pool-0):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 45224
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598)
    app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 569 (RPCClient-NioEventLoopGroup-6-1):
  State: RUNNABLE
  Blocked count: 6
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 591 (region-location-1):
  State: WAITING
  Blocked count: 3
  Waited count: 9
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@ef4f3ed
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 592 (region-location-2):
  State: WAITING
  Blocked count: 2
  Waited count: 7
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@ef4f3ed
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 593 (region-location-3):
  State: WAITING
  Blocked count: 2
  Waited count: 6
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@ef4f3ed
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 595 (ForkJoinPool.commonPool-worker-2):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 603
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623)
    java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165)
Thread 1015 (MutableQuantiles-0):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 428
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 1076 (RPCClient-NioEventLoopGroup-6-2):
  State: RUNNABLE
  Blocked count: 11
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 1104 (RS-EventLoopGroup-4-2):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 1115 (zk-permission-watcher-pool-0):
  State: WAITING
  Blocked count: 61
  Waited count: 93
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6e6dadab
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 1173 (RPCClient-NioEventLoopGroup-6-3):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 1174 (RS-EventLoopGroup-4-3):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 1531 (Container metrics unregistration):
  State: WAITING
  Blocked count: 10
  Waited count: 33
  Waiting on java.util.TaskQueue@47af5010
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    java.base@17.0.11/java.lang.Object.wait(Object.java:338)
    java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537)
    java.base@17.0.11/java.util.TimerThread.run(Timer.java:516)
Thread 1854 (RS-EventLoopGroup-3-2):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 1913 (RPCClient-NioEventLoopGroup-6-4):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 1914 (RS-EventLoopGroup-3-3):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 2824 (region-location-4):
  State: WAITING
  Blocked count: 1
  Waited count: 5
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@ef4f3ed
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 4946 (RPCClient-NioEventLoopGroup-6-5):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 4947 (RPCClient-NioEventLoopGroup-6-6):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 4948 (RPCClient-NioEventLoopGroup-6-7):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 9175 (AsyncFSWAL-1-hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/MasterData-prefix:428ded7e54d6,46337,1733631983069):
  State: WAITING
  Blocked count: 0
  Waited count: 1
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6dad80b8
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 9179 (Timer for 'JobHistoryServer' metrics system):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 15
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563)
    java.base@17.0.11/java.util.TimerThread.run(Timer.java:516)
2024-12-08T04:34:22,006 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details.
2024-12-08T04:34:52,006 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details.
Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;428ded7e54d6:46337
218 active threads
Thread 1 (main):
  State: TIMED_WAITING
  Blocked count: 1
  Waited count: 4
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444)
    java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203)
    app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167)
    app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128)
    app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39)
    app//org.junit.rules.RunRules.evaluate(RunRules.java:20)
    app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306)
    app//org.junit.runners.ParentRunner.run(ParentRunner.java:413)
    app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316)
    app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240)
    app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214)
    app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155)
    app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385)
    app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162)
    app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507)
    app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495)
Thread 2 (Reference Handler):
  State: RUNNABLE
  Blocked count: 6
  Waited count: 0
  Stack:
    java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method)
    java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253)
    java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215)
Thread 3 (Finalizer):
  State: WAITING
  Blocked count: 18
  Waited count: 14
  Waiting on java.lang.ref.ReferenceQueue$Lock@6bbd803
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155)
    java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176)
    java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172)
Thread 4 (Signal Dispatcher):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
Thread 12 (Common-Cleaner):
  State: TIMED_WAITING
  Blocked count: 15
  Waited count: 21
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155)
    java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
    java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162)
Thread 13 (Notification Thread):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
Thread 14 (pool-1-thread-1):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 29
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200)
    java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281)
    java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 15 (pool-1-thread-2):
  State: WAITING
  Blocked count: 0
  Waited count: 26
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5ff45b9e
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420)
    java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275)
    java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 16 (surefire-forkedjvm-stream-flusher):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 5245
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 18 (surefire-forkedjvm-command-thread):
  State: WAITING
  Blocked count: 0
  Waited count: 53
  Waiting on java.util.concurrent.CountDownLatch$Sync@55e51d58
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047)
    java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230)
    java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178)
    app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127)
    java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244)
    java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284)
    java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343)
    app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169)
    app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50)
    app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430)
    app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419)
    app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116)
    app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77)
    app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60)
    app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 22 (Time-limited test):
  State: RUNNABLE
  Blocked count: 12117
  Waited count: 12669
  Stack:
    java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method)
    java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197)
    java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154)
    app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181)
    app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186)
    app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113)
    app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:394)
    app//org.apache.hadoop.hbase.MiniHBaseCluster.waitUntilShutDown(MiniHBaseCluster.java:921)
    app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniHBaseCluster(HBaseTestingUtility.java:1359)
    app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniCluster(HBaseTestingUtility.java:1341)
    app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121)
    java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77)
    java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
    java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568)
    app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59)
    app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
    app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56)
    app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46)
    app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33)
Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner):
  State: WAITING
  Blocked count: 10
  Waited count: 11
  Waiting on java.lang.ref.ReferenceQueue$Lock@3056cb7e
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155)
    java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176)
    app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 25 (SSL Certificates Store Monitor):
  State: WAITING
  Blocked count: 0
  Waited count: 1
  Waiting on java.util.TaskQueue@4d034e41
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    java.base@17.0.11/java.lang.Object.wait(Object.java:338)
    java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537)
    java.base@17.0.11/java.util.TimerThread.run(Timer.java:516)
Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@263e077f):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1043
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 105
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161)
Thread 36 (pool-6-thread-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 37 (qtp2988114-37):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183)
    app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137)
    app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff88428988.run(Unknown Source)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 38 (qtp2988114-38):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183)
    app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137)
    app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff88428988.run(Unknown Source)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 39 (qtp2988114-39):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183)
    app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137)
    app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff88428988.run(Unknown Source)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 40 (qtp2988114-40):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183)
    app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137)
    app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff88428988.run(Unknown Source)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 41 (qtp2988114-41-acceptor-0@2a09dac-ServerConnector@2d3d9b09{HTTP/1.1, (http/1.1)}{localhost:40303}):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/sun.nio.ch.Net.accept(Native Method)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391)
    app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388)
    app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 42 (qtp2988114-42):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 9
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 43 (qtp2988114-43):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 9
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 44 (qtp2988114-44):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 9
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 45 (Session-HouseKeeper-4898edba-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 46 (pool-7-thread-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 53 (FSEditLogAsync):
  State: WAITING
  Blocked count: 27
  Waited count: 3058
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60ef7501
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420)
    app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241)
    app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 55 (Socket Reader #1 for port 0):
  State: RUNNABLE
  Blocked count: 1
  Waited count: 1
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497)
    app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476)
Thread 56 (IPC Server idle connection scanner for port 41407):
  State: TIMED_WAITING
  Blocked count: 1
  Waited count: 54
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563)
    java.base@17.0.11/java.util.TimerThread.run(Timer.java:516)
Thread 58 (Hadoop-Metrics-Updater-0):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 105
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@1f645805):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 2
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 62 (DatanodeAdminMonitor-0):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 174
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@40205119):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 105
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 47 (RedundancyMonitor):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 174
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344)
    java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446)
    app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 48 (MarkedDeleteBlockScrubberThread):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 51530
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 51 (Block report processor):
  State: WAITING
  Blocked count: 4
  Waited count: 1513
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@c2821ea
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420)
    app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627)
    app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614)
Thread 57 (IPC Server Responder):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
    app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733)
    app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716)
Thread 54 (IPC Server listener on 0):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559)
Thread 64 (IPC Server handler 0 on default port 41407):
  State: TIMED_WAITING
  Blocked count: 54
  Waited count: 2281
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 65 (IPC Server handler 1 on default port 41407):
  State: TIMED_WAITING
  Blocked count: 49
  Waited count: 2298
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 66 (IPC Server handler 2 on default port 41407):
  State: TIMED_WAITING
  Blocked count: 53
  Waited count: 2296
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 67 (IPC Server handler 3 on default port 41407):
  State: TIMED_WAITING
  Blocked count: 60
  Waited count: 2289
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 68 (IPC Server handler 4 on default port 41407):
  State: TIMED_WAITING
  Blocked count: 46
  Waited count: 2300
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 69 (pool-12-thread-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@34326363):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 261
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@21d9c23a):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 105
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2b27bce4):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 2
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@6f79ee98):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 3
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 75 (CacheReplicationMonitor(1867710578)):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 19
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759)
    app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186)
Thread 86 (pool-18-thread-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 87 (qtp1072063765-87):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183)
    app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137)
    app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff88428988.run(Unknown Source)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 88 (qtp1072063765-88-acceptor-0@4c0c4250-ServerConnector@16165456{HTTP/1.1, (http/1.1)}{localhost:45189}):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/sun.nio.ch.Net.accept(Native Method)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391)
    app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388)
    app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 89 (qtp1072063765-89):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 9
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 90 (qtp1072063765-90):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 9
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 91 (Session-HouseKeeper-5672d73d-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 92 (nioEventLoopGroup-2-1):
  State: RUNNABLE
  Blocked count: 1
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68)
    app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879)
    app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526)
    app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994)
    app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@47b57b32):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1040
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 95 (Socket Reader #1 for port 0):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497)
    app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476)
Thread 96 (IPC Server idle connection scanner for port 44671):
  State: TIMED_WAITING
  Blocked count: 1
  Waited count: 53
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563)
    java.base@17.0.11/java.util.TimerThread.run(Timer.java:516)
Thread 98 (Hadoop-Metrics-Updater-0):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 104
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 101 (Command processor):
  State: WAITING
  Blocked count: 0
  Waited count: 350
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7a781124
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411)
    app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395)
Thread 102 (BP-889361663-172.17.0.2-1733631978419 heartbeating to localhost/127.0.0.1:41407):
  State: TIMED_WAITING
  Blocked count: 1306
  Waited count: 1481
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158)
    app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771)
    app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 103 (pool-20-thread-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4ab9638):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.Net.accept(Native Method)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391)
    java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126)
    app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85)
    app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 97 (IPC Server Responder):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
    app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733)
    app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716)
Thread 94 (IPC Server listener on 0):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559)
Thread 104 (IPC Server handler 0 on default port 44671):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 522
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 105 (IPC Server handler 1 on default port 44671):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 521
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 106 (IPC Server handler 2 on default port 44671):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 520
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 107 (IPC Server handler 3 on default port 44671):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 520
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 108 (IPC Server handler 4 on default port 44671):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 520
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 120 (pool-26-thread-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 113 (IPC Client (30462390) connection to localhost/127.0.0.1:41407 from jenkins):
  State: TIMED_WAITING
  Blocked count: 1476
  Waited count: 1477
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042)
    app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093)
Thread 114 (IPC Parameter Sending Thread for localhost/127.0.0.1:41407):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 2081
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704)
    java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903)
    app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 121 (qtp440766313-121):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183)
    app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137)
    app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff88428988.run(Unknown Source)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 122 (qtp440766313-122-acceptor-0@1311a7c5-ServerConnector@53bdbdf4{HTTP/1.1, (http/1.1)}{localhost:42377}):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/sun.nio.ch.Net.accept(Native Method)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391)
    app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388)
    app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 123 (qtp440766313-123):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 9
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 124 (qtp440766313-124):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 9
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 125 (Session-HouseKeeper-16c500e0-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 127 (nioEventLoopGroup-4-1):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68)
    app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879)
    app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526)
    app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994)
    app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 128 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@1738d90b):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1039
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 130 (Socket Reader #1 for port 0):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497)
    app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476)
Thread 131 (IPC Server idle connection scanner for port 37189):
  State: TIMED_WAITING
  Blocked count: 1
  Waited count: 53
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563)
    java.base@17.0.11/java.util.TimerThread.run(Timer.java:516)
Thread 133 (Hadoop-Metrics-Updater-0):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 104
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 137 (Command processor):
  State: WAITING
  Blocked count: 1
  Waited count: 338
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56b92a9d
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411)
    app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395)
Thread 138 (BP-889361663-172.17.0.2-1733631978419 heartbeating to localhost/127.0.0.1:41407):
  State: TIMED_WAITING
  Blocked count: 1295
  Waited count: 1470
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158)
    app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771)
    app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 139 (pool-29-thread-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 119 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@28ee108f):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.Net.accept(Native Method)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391)
    java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126)
    app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85)
    app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 132 (IPC Server Responder):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
    app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733)
    app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716)
Thread 129 (IPC Server listener on 0):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559)
Thread 140 (IPC Server handler 0 on default port 37189):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 520
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 141 (IPC Server handler 1 on default port 37189):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 520
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 142 (IPC Server handler 2 on default port 37189):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 520
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 143 (IPC Server handler 3 on default port 37189):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 520
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 144 (IPC Server handler 4 on default port 37189):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 520
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 151 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data1)):
  State: TIMED_WAITING
  Blocked count: 5
  Waited count: 2
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656)
Thread 152 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data2)):
  State: TIMED_WAITING
  Blocked count: 9
  Waited count: 2
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656)
Thread 158 (pool-39-thread-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 161 (qtp1849637494-161):
  State: RUNNABLE
  Blocked count: 1
  Waited count: 1
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183)
    app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137)
    app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff88428988.run(Unknown Source)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 162 (qtp1849637494-162-acceptor-0@63906456-ServerConnector@524316e2{HTTP/1.1, (http/1.1)}{localhost:40445}):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/sun.nio.ch.Net.accept(Native Method)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391)
    app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388)
    app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 163 (qtp1849637494-163):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 9
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 165 (qtp1849637494-165):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 9
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 166 (Session-HouseKeeper-27c68f4d-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 164 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data3)):
  State: TIMED_WAITING
  Blocked count: 1
  Waited count: 2
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656)
Thread 169 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data4)):
  State: TIMED_WAITING
  Blocked count: 1
  Waited count: 2
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656)
Thread 172 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data2/current/BP-889361663-172.17.0.2-1733631978419):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 171 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data1/current/BP-889361663-172.17.0.2-1733631978419):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data3/current/BP-889361663-172.17.0.2-1733631978419):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data4/current/BP-889361663-172.17.0.2-1733631978419):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 188 (pool-15-thread-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 189 (pool-23-thread-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 190 (nioEventLoopGroup-6-1):
  State: RUNNABLE
  Blocked count: 1
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68)
    app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879)
    app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526)
    app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994)
    app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 193 (java.util.concurrent.ThreadPoolExecutor$Worker@56ec85f[State = -1, empty queue]):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 196 (java.util.concurrent.ThreadPoolExecutor$Worker@324b4eae[State = -1, empty queue]):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 197 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@3862465c):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1038
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 199 (Socket Reader #1 for port 0):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497)
    app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476)
Thread 200 (IPC Server idle connection scanner for port 34801):
  State: TIMED_WAITING
  Blocked count: 1
  Waited count: 53
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563)
    java.base@17.0.11/java.util.TimerThread.run(Timer.java:516)
Thread 202 (Hadoop-Metrics-Updater-0):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 104
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 205 (Command processor):
  State: WAITING
  Blocked count: 0
  Waited count: 334
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@16ba07ee
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411)
    app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395)
Thread 206 (BP-889361663-172.17.0.2-1733631978419 heartbeating to localhost/127.0.0.1:41407):
  State: TIMED_WAITING
  Blocked count: 1292
  Waited count: 1478
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158)
    app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771)
    app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 207 (pool-46-thread-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 157 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@1ec78693):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.Net.accept(Native Method)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391)
    java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126)
    app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85)
    app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 201 (IPC Server Responder):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
    app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733)
    app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716)
Thread 198 (IPC Server listener on 0):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559)
Thread 208 (IPC Server handler 0 on default port 34801):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 519
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 209 (IPC Server handler 1 on default port 34801):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 519
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 210 (IPC Server handler 2 on default port 34801):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 519
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 211 (IPC Server handler 3 on default port 34801):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 519
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 212 (IPC Server handler 4 on default port 34801):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 519
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 215 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data5)):
  State: TIMED_WAITING
  Blocked count: 1
  Waited count: 2
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656)
Thread 216 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data6)):
  State: TIMED_WAITING
  Blocked count: 1
  Waited count: 2
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656)
Thread 221 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data5/current/BP-889361663-172.17.0.2-1733631978419):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 222 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data6/current/BP-889361663-172.17.0.2-1733631978419):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 226 (pool-36-thread-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 229 (java.util.concurrent.ThreadPoolExecutor$Worker@3c782deb[State = -1, empty queue]):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 234 (FsDatasetAsyncDiskServiceFixer):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 18
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtility.java:599)
Thread 237 (NIOServerCxnFactory.SelectorThread-1):
  State: RUNNABLE
  Blocked count: 3
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403)
    app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368)
Thread 236 (NIOServerCxnFactory.SelectorThread-0):
  State: RUNNABLE
  Blocked count: 6
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403)
    app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368)
Thread 238 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:55878):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205)
    app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181)
Thread 235 (ConnnectionExpirer):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 52
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554)
Thread 239 (SessionTracker):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 260
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163)
Thread 240 (SyncThread:0):
  State: WAITING
  Blocked count: 33
  Waited count: 740
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7892c0b7
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170)
Thread 241 (ProcessThread(sid:0 cport:55878):):
  State: WAITING
  Blocked count: 0
  Waited count: 840
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6699e2fe
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142)
Thread 242 (RequestThrottler):
  State: WAITING
  Blocked count: 1
  Waited count: 886
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@21932026
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147)
Thread 243 (NIOWorkerThread-1):
  State: WAITING
  Blocked count: 4
  Waited count: 134
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 254 (Time-limited test.named-queue-events-pool-0):
  State: WAITING
  Blocked count: 0
  Waited count: 1
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@33adabda
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47)
    app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56)
    app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159)
    app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 255 (HBase-Metrics2-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 352
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 256 (RS-EventLoopGroup-1-1):
  State: RUNNABLE
  Blocked count: 30
  Waited count: 0
  Stack:
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 257 (Time-limited test-SendThread(127.0.0.1:55878)):
  State: RUNNABLE
  Blocked count: 29
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
    app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332)
    app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289)
Thread 258 (Time-limited test-EventThread):
  State: WAITING
  Blocked count: 1
  Waited count: 57
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22a0a024
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550)
Thread 259 (NIOWorkerThread-2):
  State: WAITING
  Blocked count: 1
  Waited count: 133
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 260 (NIOWorkerThread-3):
  State: WAITING
  Blocked count: 0
  Waited count: 133
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 261 (NIOWorkerThread-4):
  State: WAITING
  Blocked count: 1
  Waited count: 135
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 262 (zk-event-processor-pool-0):
  State: WAITING
  Blocked count: 37
  Waited count: 88
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d76ba68
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 263 (NIOWorkerThread-5):
  State: WAITING
  Blocked count: 0
  Waited count: 134
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 264 (NIOWorkerThread-6):
  State: WAITING
  Blocked count: 1
  Waited count: 132
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 265 (NIOWorkerThread-7):
  State: WAITING
  Blocked count: 2
  Waited count: 134
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 266 (NIOWorkerThread-8):
  State: WAITING
  Blocked count: 2
  Waited count: 133
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 267 (NIOWorkerThread-9):
  State: WAITING
  Blocked count: 0
  Waited count: 133
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 268 (NIOWorkerThread-10):
  State: WAITING
  Blocked count: 2
  Waited count: 134
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 269 (NIOWorkerThread-11):
  State: WAITING
  Blocked count: 0
  Waited count: 133
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 270 (NIOWorkerThread-12):
  State: WAITING
  Blocked count: 2
  Waited count: 133
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 271 (NIOWorkerThread-13):
  State: WAITING
  Blocked count: 1
  Waited count: 133
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 272 (NIOWorkerThread-14):
  State: WAITING
  Blocked count: 1
  Waited count: 133
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 273 (NIOWorkerThread-15):
  State: WAITING
  Blocked count: 3
  Waited count: 133
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 274 (NIOWorkerThread-16):
  State: WAITING
  Blocked count: 3
  Waited count: 134
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 275 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46337):
  State: WAITING
  Blocked count: 0
  Waited count: 1
  Waiting on java.util.concurrent.Semaphore$NonfairSync@35b2967
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047)
    java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318)
    app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85)
Thread 276 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337):
  State: WAITING
  Blocked count: 293
  Waited count: 1068
  Waiting on java.util.concurrent.Semaphore$NonfairSync@48909076
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047)
    java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318)
    app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85)
Thread 277 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337):
  State: WAITING
  Blocked count: 11
  Waited count: 83
  Waiting on java.util.concurrent.Semaphore$NonfairSync@6f96b8c0
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047)
    java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318)
    app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85)
Thread 278 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46337):
  State: WAITING
  Blocked count: 76
  Waited count: 6057
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@75bfaa0
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85)
Thread 279 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46337):
  State: WAITING
  Blocked count: 0
  Waited count: 1
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d91eb3d
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85)
Thread 280 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46337):
  State: WAITING
  Blocked count: 0
  Waited count: 1
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d91eb3d
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85)
Thread 281 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=46337):
  State: WAITING
  Blocked count: 0
  Waited count: 1
  Waiting on java.util.concurrent.Semaphore$NonfairSync@114ed3d9
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047)
    java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318)
    app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85)
Thread 282 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=46337):
  State: WAITING
  Blocked count: 0
  Waited count: 1
  Waiting on java.util.concurrent.Semaphore$NonfairSync@4b442988
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047)
    java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318)
    app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85)
Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=46337):
  State: WAITING
  Blocked count: 0
  Waited count: 1
  Waiting on java.util.concurrent.Semaphore$NonfairSync@139244ba
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047)
    java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318)
    app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85)
Thread 284 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=46337):
  State: WAITING
  Blocked count: 0
  Waited count: 3
  Waiting on java.util.concurrent.Semaphore$NonfairSync@41889df6
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047)
    java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318)
    app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85)
Thread 288 (RS-EventLoopGroup-3-1):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 310 (RS-EventLoopGroup-4-1):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 332 (RS-EventLoopGroup-5-1):
  State: RUNNABLE
  Blocked count: 73
  Waited count: 4
  Stack:
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 286 (M:0;428ded7e54d6:46337):
  State: TIMED_WAITING
  Blocked count: 6
  Waited count: 2765
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759)
    app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169)
    app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:879)
    app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doSync(AsyncFSWAL.java:671)
    app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$1(AbstractFSWAL.java:600)
    app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$959/0x00007eff88ef0480.run(Unknown Source)
    app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187)
    app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:600)
    app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:590)
    app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2869)
    app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2811)
    app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2670)
    app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2644)
    app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2635)
    app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1810)
    app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1631)
    app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1586)
    app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1569)
Thread 355 (Monitor thread for TaskMonitor):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 52
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 357 (master/428ded7e54d6:0:becomeActiveMaster-MemStoreChunkPool Statistics):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 2
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 359 (master/428ded7e54d6:0:becomeActiveMaster-MemStoreChunkPool Statistics):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 2
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 361 (org.apache.hadoop.hdfs.PeerCache@5df40f7f):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 173
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253)
    app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46)
    app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 380 (master:store-WAL-Roller):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 5153
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:179)
Thread 397 (RS-EventLoopGroup-5-2):
  State: RUNNABLE
  Blocked count: 79
  Waited count: 4
  Stack:
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 398 (RS-EventLoopGroup-5-3):
  State: RUNNABLE
  Blocked count: 90
  Waited count: 4
  Stack:
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 410 (Idle-Rpc-Conn-Sweeper-pool-0):
  State: WAITING
  Blocked count: 0
  Waited count: 68
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@488942f0
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 421 (SnapshotHandlerChoreCleaner):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 52
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 409 (RpcClient-timer-pool-0):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 51470
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598)
    app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 430 (RS-EventLoopGroup-1-2):
  State: RUNNABLE
  Blocked count: 28
  Waited count: 0
  Stack:
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 431 (RS-EventLoopGroup-1-3):
  State: RUNNABLE
  Blocked count: 29
  Waited count: 0
  Stack:
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 454 (RegionServerTracker-0):
  State: WAITING
  Blocked count: 9
  Waited count: 12
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@313bc781
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 474 (regionserver/428ded7e54d6:0.procedureResultReporter):
  State: WAITING
  Blocked count: 12
  Waited count: 25
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@24c98781
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75)
Thread 476 (regionserver/428ded7e54d6:0.procedureResultReporter):
  State: WAITING
  Blocked count: 13
  Waited count: 27
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2655cf2a
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75)
Thread 484 (regionserver/428ded7e54d6:0.procedureResultReporter):
  State: WAITING
  Blocked count: 16
  Waited count: 33
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@619c3790
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75)
Thread 529 (region-location-0):
  State: WAITING
  Blocked count: 8
  Waited count: 13
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@ef4f3ed
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 556 (Async-Client-Retry-Timer-pool-0):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 51226
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598)
    app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 569 (RPCClient-NioEventLoopGroup-6-1):
  State: RUNNABLE
  Blocked count: 6
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 591 (region-location-1):
  State: WAITING
  Blocked count: 3
  Waited count: 9
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@ef4f3ed
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 592 (region-location-2):
  State: WAITING
  Blocked count: 2
  Waited count: 7
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@ef4f3ed
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 593 (region-location-3):
  State: WAITING
  Blocked count: 2
  Waited count: 6
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@ef4f3ed
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 1015 (MutableQuantiles-0):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 434
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 1076 (RPCClient-NioEventLoopGroup-6-2):
  State: RUNNABLE
  Blocked count: 11
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 1104 (RS-EventLoopGroup-4-2):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 1115 (zk-permission-watcher-pool-0):
  State: WAITING
  Blocked count: 61
  Waited count: 93
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6e6dadab
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 1173 (RPCClient-NioEventLoopGroup-6-3):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 1174 (RS-EventLoopGroup-4-3):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 1531 (Container metrics unregistration):
  State: WAITING
  Blocked count: 10
  Waited count: 33
  Waiting on java.util.TaskQueue@47af5010
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    java.base@17.0.11/java.lang.Object.wait(Object.java:338)
    java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537)
    java.base@17.0.11/java.util.TimerThread.run(Timer.java:516)
Thread 1854 (RS-EventLoopGroup-3-2):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 1913 (RPCClient-NioEventLoopGroup-6-4):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 1914 (RS-EventLoopGroup-3-3):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 2824 (region-location-4):
  State: WAITING
  Blocked count: 1
  Waited count: 5
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@ef4f3ed
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 4946 (RPCClient-NioEventLoopGroup-6-5):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 4947 (RPCClient-NioEventLoopGroup-6-6):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 4948 (RPCClient-NioEventLoopGroup-6-7):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 9175 (AsyncFSWAL-1-hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/MasterData-prefix:428ded7e54d6,46337,1733631983069):
  State: WAITING
  Blocked count: 0
  Waited count: 1
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6dad80b8
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 9179 (Timer for 'JobHistoryServer' metrics system):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 21
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563)
    java.base@17.0.11/java.util.TimerThread.run(Timer.java:516)
2024-12-08T04:35:22,006 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details.
2024-12-08T04:35:52,006 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details.
2024-12-08T04:36:00,283 DEBUG [M:0;428ded7e54d6:46337 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682:

2024-12-08T04:36:00,283 WARN  [M:0;428ded7e54d6:46337 {}] region.MasterRegion(134): Failed to close region
org.apache.hadoop.hbase.regionserver.wal.WALSyncTimeoutIOException: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=3722, WAL system stuck?
	at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:883) ~[classes/:?]
	at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doSync(AsyncFSWAL.java:671) ~[classes/:?]
	at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$1(AbstractFSWAL.java:600) ~[classes/:?]
	at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT]
	at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:600) ~[classes/:?]
	at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:590) ~[classes/:?]
	at org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2869) ~[classes/:?]
	at org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2811) ~[classes/:?]
	at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2670) ~[classes/:?]
	at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2644) ~[classes/:?]
	at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2635) ~[classes/:?]
	at org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1810) ~[classes/:?]
	at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1631) ~[classes/:?]
	at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1586) ~[classes/:?]
	at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1569) ~[classes/:?]
	at org.apache.hadoop.hbase.master.region.MasterRegion.closeRegion(MasterRegion.java:132) ~[classes/:?]
	at org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:205) ~[classes/:?]
	at org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1758) ~[classes/:?]
	at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:1285) ~[classes/:?]
	at org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:603) ~[classes/:?]
	at java.lang.Thread.run(Thread.java:840) ~[?:?]
Caused by: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=3722, WAL system stuck?
	at org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:171) ~[classes/:?]
	at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:879) ~[classes/:?]
	... 20 more
2024-12-08T04:36:00,284 WARN  [Close-WAL-Writer-0 {}] wal.AsyncProtobufLogWriter(163): normal close failed, try recover
java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.ensureWritable(int)" because "this.buf" is null
	at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.writeInt(FanOutOneBlockAsyncDFSOutput.java:396) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT]
	at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.lambda$writeWALTrailerAndMagic$3(AsyncProtobufLogWriter.java:243) ~[classes/:?]
	at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALMetadata(AsyncProtobufLogWriter.java:201) ~[classes/:?]
	at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALTrailerAndMagic(AsyncProtobufLogWriter.java:236) ~[classes/:?]
	at org.apache.hadoop.hbase.regionserver.wal.AbstractProtobufLogWriter.writeWALTrailer(AbstractProtobufLogWriter.java:252) ~[classes/:?]
	at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:160) ~[classes/:?]
	at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:732) ~[classes/:?]
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?]
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?]
	at java.lang.Thread.run(Thread.java:840) ~[?:?]
2024-12-08T04:36:00,286 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils
2024-12-08T04:36:00,286 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease()
2024-12-08T04:36:00,286 INFO  [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file /user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/MasterData/WALs/428ded7e54d6,46337,1733631983069/428ded7e54d6%2C46337%2C1733631983069.1733631984748
2024-12-08T04:36:00,287 WARN  [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/MasterData/WALs/428ded7e54d6,46337,1733631983069/428ded7e54d6%2C46337%2C1733631983069.1733631984748 after 1ms
java.io.IOException: Filesystem closed
	at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?]
	at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?]
	at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?]
	at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?]
	at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?]
	at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?]
	at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?]
	at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?]
	at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?]
	at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?]
	at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?]
	at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?]
	at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?]
	at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:610) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT]
	at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:164) ~[classes/:?]
	at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:732) ~[classes/:?]
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?]
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?]
	at java.lang.Thread.run(Thread.java:840) ~[?:?]
2024-12-08T04:36:00,287 WARN  [Close-WAL-Writer-0 {}] wal.AsyncFSWAL(734): close old writer failed.
java.io.InterruptedIOException: Operation cancelled
	at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.checkIfCancelled(RecoverLeaseFSUtils.java:269) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?]
	at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:159) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?]
	at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?]
	at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:610) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT]
	at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:164) ~[classes/:?]
	at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:732) ~[classes/:?]
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?]
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?]
	at java.lang.Thread.run(Thread.java:840) ~[?:?]
2024-12-08T04:36:00,287 INFO  [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/MasterData/WALs/428ded7e54d6,46337,1733631983069/428ded7e54d6%2C46337%2C1733631983069.1733631984748
2024-12-08T04:36:00,287 WARN  [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/MasterData/WALs/428ded7e54d6,46337,1733631983069/428ded7e54d6%2C46337%2C1733631983069.1733631984748 after 0ms
java.io.IOException: Filesystem closed
	at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?]
	at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?]
	at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?]
	at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?]
	at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?]
	at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?]
	at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?]
	at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?]
	at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?]
	at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?]
	at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?]
	at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?]
	at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?]
	at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.recoverLease(AsyncFSWAL.java:722) ~[classes/:?]
	at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:735) ~[classes/:?]
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?]
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?]
	at java.lang.Thread.run(Thread.java:840) ~[?:?]
Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;428ded7e54d6:46337
221 active threads
Thread 1 (main):
  State: TIMED_WAITING
  Blocked count: 1
  Waited count: 4
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444)
    java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203)
    app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167)
    app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128)
    app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39)
    app//org.junit.rules.RunRules.evaluate(RunRules.java:20)
    app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306)
    app//org.junit.runners.ParentRunner.run(ParentRunner.java:413)
    app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316)
    app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240)
    app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214)
    app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155)
    app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385)
    app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162)
    app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507)
    app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495)
Thread 2 (Reference Handler):
  State: RUNNABLE
  Blocked count: 6
  Waited count: 0
  Stack:
    java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method)
    java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253)
    java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215)
Thread 3 (Finalizer):
  State: WAITING
  Blocked count: 18
  Waited count: 14
  Waiting on java.lang.ref.ReferenceQueue$Lock@6bbd803
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155)
    java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176)
    java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172)
Thread 4 (Signal Dispatcher):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
Thread 12 (Common-Cleaner):
  State: TIMED_WAITING
  Blocked count: 15
  Waited count: 22
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155)
    java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
    java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162)
Thread 13 (Notification Thread):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
Thread 14 (pool-1-thread-1):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 32
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200)
    java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281)
    java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 15 (pool-1-thread-2):
  State: WAITING
  Blocked count: 0
  Waited count: 29
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5ff45b9e
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420)
    java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275)
    java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 16 (surefire-forkedjvm-stream-flusher):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 5845
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 18 (surefire-forkedjvm-command-thread):
  State: WAITING
  Blocked count: 0
  Waited count: 59
  Waiting on java.util.concurrent.CountDownLatch$Sync@5093297e
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047)
    java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230)
    java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178)
    app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127)
    java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244)
    java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284)
    java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343)
    app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169)
    app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50)
    app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430)
    app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419)
    app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116)
    app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77)
    app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60)
    app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 22 (Time-limited test):
  State: RUNNABLE
  Blocked count: 12117
  Waited count: 12670
  Stack:
    java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method)
    java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197)
    java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154)
    app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181)
    app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186)
    app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113)
    app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:394)
    app//org.apache.hadoop.hbase.MiniHBaseCluster.waitUntilShutDown(MiniHBaseCluster.java:921)
    app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniHBaseCluster(HBaseTestingUtility.java:1359)
    app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniCluster(HBaseTestingUtility.java:1341)
    app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121)
    java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77)
    java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
    java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568)
    app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59)
    app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
    app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56)
    app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46)
    app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33)
Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner):
  State: WAITING
  Blocked count: 10
  Waited count: 11
  Waiting on java.lang.ref.ReferenceQueue$Lock@3056cb7e
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155)
    java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176)
    app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 25 (SSL Certificates Store Monitor):
  State: WAITING
  Blocked count: 0
  Waited count: 1
  Waiting on java.util.TaskQueue@4d034e41
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    java.base@17.0.11/java.lang.Object.wait(Object.java:338)
    java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537)
    java.base@17.0.11/java.util.TimerThread.run(Timer.java:516)
Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@263e077f):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1163
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 117
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161)
Thread 36 (pool-6-thread-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 37 (qtp2988114-37):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183)
    app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137)
    app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff88428988.run(Unknown Source)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 38 (qtp2988114-38):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183)
    app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137)
    app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff88428988.run(Unknown Source)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 39 (qtp2988114-39):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183)
    app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137)
    app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff88428988.run(Unknown Source)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 40 (qtp2988114-40):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183)
    app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137)
    app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff88428988.run(Unknown Source)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 41 (qtp2988114-41-acceptor-0@2a09dac-ServerConnector@2d3d9b09{HTTP/1.1, (http/1.1)}{localhost:40303}):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/sun.nio.ch.Net.accept(Native Method)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391)
    app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388)
    app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 42 (qtp2988114-42):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 10
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 43 (qtp2988114-43):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 10
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 44 (qtp2988114-44):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 10
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 45 (Session-HouseKeeper-4898edba-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 46 (pool-7-thread-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 53 (FSEditLogAsync):
  State: WAITING
  Blocked count: 27
  Waited count: 3058
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60ef7501
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420)
    app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241)
    app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 55 (Socket Reader #1 for port 0):
  State: RUNNABLE
  Blocked count: 1
  Waited count: 1
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497)
    app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476)
Thread 56 (IPC Server idle connection scanner for port 41407):
  State: TIMED_WAITING
  Blocked count: 1
  Waited count: 60
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563)
    java.base@17.0.11/java.util.TimerThread.run(Timer.java:516)
Thread 58 (Hadoop-Metrics-Updater-0):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 117
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@1f645805):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 2
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 62 (DatanodeAdminMonitor-0):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 194
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@40205119):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 117
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 47 (RedundancyMonitor):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 194
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344)
    java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446)
    app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 48 (MarkedDeleteBlockScrubberThread):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 57494
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 51 (Block report processor):
  State: WAITING
  Blocked count: 4
  Waited count: 1513
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@c2821ea
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420)
    app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627)
    app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614)
Thread 57 (IPC Server Responder):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
    app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733)
    app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716)
Thread 54 (IPC Server listener on 0):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559)
Thread 64 (IPC Server handler 0 on default port 41407):
  State: TIMED_WAITING
  Blocked count: 54
  Waited count: 2342
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 65 (IPC Server handler 1 on default port 41407):
  State: TIMED_WAITING
  Blocked count: 49
  Waited count: 2359
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 66 (IPC Server handler 2 on default port 41407):
  State: TIMED_WAITING
  Blocked count: 53
  Waited count: 2357
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 67 (IPC Server handler 3 on default port 41407):
  State: TIMED_WAITING
  Blocked count: 60
  Waited count: 2350
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 68 (IPC Server handler 4 on default port 41407):
  State: TIMED_WAITING
  Blocked count: 46
  Waited count: 2361
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 69 (pool-12-thread-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@34326363):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 291
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@21d9c23a):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 117
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2b27bce4):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 2
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@6f79ee98):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 3
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 75 (CacheReplicationMonitor(1867710578)):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 21
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759)
    app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186)
Thread 86 (pool-18-thread-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 87 (qtp1072063765-87):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183)
    app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137)
    app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff88428988.run(Unknown Source)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 88 (qtp1072063765-88-acceptor-0@4c0c4250-ServerConnector@16165456{HTTP/1.1, (http/1.1)}{localhost:45189}):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/sun.nio.ch.Net.accept(Native Method)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391)
    app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388)
    app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 89 (qtp1072063765-89):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 10
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 90 (qtp1072063765-90):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 10
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 91 (Session-HouseKeeper-5672d73d-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 92 (nioEventLoopGroup-2-1):
  State: RUNNABLE
  Blocked count: 1
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68)
    app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879)
    app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526)
    app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994)
    app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@47b57b32):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1160
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 95 (Socket Reader #1 for port 0):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497)
    app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476)
Thread 96 (IPC Server idle connection scanner for port 44671):
  State: TIMED_WAITING
  Blocked count: 1
  Waited count: 59
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563)
    java.base@17.0.11/java.util.TimerThread.run(Timer.java:516)
Thread 98 (Hadoop-Metrics-Updater-0):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 117
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 101 (Command processor):
  State: WAITING
  Blocked count: 0
  Waited count: 370
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7a781124
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411)
    app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395)
Thread 102 (BP-889361663-172.17.0.2-1733631978419 heartbeating to localhost/127.0.0.1:41407):
  State: TIMED_WAITING
  Blocked count: 1326
  Waited count: 1521
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158)
    app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771)
    app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 103 (pool-20-thread-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4ab9638):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.Net.accept(Native Method)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391)
    java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126)
    app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85)
    app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 97 (IPC Server Responder):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
    app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733)
    app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716)
Thread 94 (IPC Server listener on 0):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559)
Thread 104 (IPC Server handler 0 on default port 44671):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 582
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 105 (IPC Server handler 1 on default port 44671):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 581
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 106 (IPC Server handler 2 on default port 44671):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 580
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 107 (IPC Server handler 3 on default port 44671):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 580
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 108 (IPC Server handler 4 on default port 44671):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 580
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 120 (pool-26-thread-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 113 (IPC Client (30462390) connection to localhost/127.0.0.1:41407 from jenkins):
  State: TIMED_WAITING
  Blocked count: 1536
  Waited count: 1537
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042)
    app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093)
Thread 114 (IPC Parameter Sending Thread for localhost/127.0.0.1:41407):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 2141
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704)
    java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903)
    app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 121 (qtp440766313-121):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183)
    app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137)
    app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff88428988.run(Unknown Source)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 122 (qtp440766313-122-acceptor-0@1311a7c5-ServerConnector@53bdbdf4{HTTP/1.1, (http/1.1)}{localhost:42377}):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/sun.nio.ch.Net.accept(Native Method)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391)
    app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388)
    app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 123 (qtp440766313-123):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 10
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 124 (qtp440766313-124):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 10
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 125 (Session-HouseKeeper-16c500e0-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 127 (nioEventLoopGroup-4-1):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68)
    app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879)
    app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526)
    app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994)
    app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 128 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@1738d90b):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1159
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 130 (Socket Reader #1 for port 0):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497)
    app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476)
Thread 131 (IPC Server idle connection scanner for port 37189):
  State: TIMED_WAITING
  Blocked count: 1
  Waited count: 59
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563)
    java.base@17.0.11/java.util.TimerThread.run(Timer.java:516)
Thread 133 (Hadoop-Metrics-Updater-0):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 116
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 137 (Command processor):
  State: WAITING
  Blocked count: 1
  Waited count: 358
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56b92a9d
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411)
    app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395)
Thread 138 (BP-889361663-172.17.0.2-1733631978419 heartbeating to localhost/127.0.0.1:41407):
  State: TIMED_WAITING
  Blocked count: 1315
  Waited count: 1510
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158)
    app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771)
    app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 139 (pool-29-thread-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 119 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@28ee108f):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.Net.accept(Native Method)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391)
    java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126)
    app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85)
    app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 132 (IPC Server Responder):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
    app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733)
    app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716)
Thread 129 (IPC Server listener on 0):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559)
Thread 140 (IPC Server handler 0 on default port 37189):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 580
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 141 (IPC Server handler 1 on default port 37189):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 580
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 142 (IPC Server handler 2 on default port 37189):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 580
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 143 (IPC Server handler 3 on default port 37189):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 580
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 144 (IPC Server handler 4 on default port 37189):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 580
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 151 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data1)):
  State: TIMED_WAITING
  Blocked count: 5
  Waited count: 2
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656)
Thread 152 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data2)):
  State: TIMED_WAITING
  Blocked count: 9
  Waited count: 2
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656)
Thread 158 (pool-39-thread-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 161 (qtp1849637494-161):
  State: RUNNABLE
  Blocked count: 1
  Waited count: 1
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183)
    app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606)
    app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173)
    app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137)
    app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff88428988.run(Unknown Source)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 162 (qtp1849637494-162-acceptor-0@63906456-ServerConnector@524316e2{HTTP/1.1, (http/1.1)}{localhost:40445}):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/sun.nio.ch.Net.accept(Native Method)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391)
    app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388)
    app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 163 (qtp1849637494-163):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 10
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 165 (qtp1849637494-165):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 10
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974)
    app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 166 (Session-HouseKeeper-27c68f4d-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 164 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data3)):
  State: TIMED_WAITING
  Blocked count: 1
  Waited count: 2
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656)
Thread 169 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data4)):
  State: TIMED_WAITING
  Blocked count: 1
  Waited count: 2
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656)
Thread 172 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data2/current/BP-889361663-172.17.0.2-1733631978419):
  State: TIMED_WAITING
  Blocked count: 2
  Waited count: 3
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 171 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data1/current/BP-889361663-172.17.0.2-1733631978419):
  State: TIMED_WAITING
  Blocked count: 3
  Waited count: 3
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data3/current/BP-889361663-172.17.0.2-1733631978419):
  State: TIMED_WAITING
  Blocked count: 2
  Waited count: 3
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data4/current/BP-889361663-172.17.0.2-1733631978419):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 188 (pool-15-thread-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 189 (pool-23-thread-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 190 (nioEventLoopGroup-6-1):
  State: RUNNABLE
  Blocked count: 1
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68)
    app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879)
    app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526)
    app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994)
    app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 193 (java.util.concurrent.ThreadPoolExecutor$Worker@56ec85f[State = -1, empty queue]):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 196 (java.util.concurrent.ThreadPoolExecutor$Worker@324b4eae[State = -1, empty queue]):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 197 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@3862465c):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1158
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 199 (Socket Reader #1 for port 0):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497)
    app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476)
Thread 200 (IPC Server idle connection scanner for port 34801):
  State: TIMED_WAITING
  Blocked count: 1
  Waited count: 59
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563)
    java.base@17.0.11/java.util.TimerThread.run(Timer.java:516)
Thread 202 (Hadoop-Metrics-Updater-0):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 116
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 205 (Command processor):
  State: WAITING
  Blocked count: 0
  Waited count: 354
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@16ba07ee
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411)
    app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395)
Thread 206 (BP-889361663-172.17.0.2-1733631978419 heartbeating to localhost/127.0.0.1:41407):
  State: TIMED_WAITING
  Blocked count: 1312
  Waited count: 1518
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158)
    app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771)
    app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 207 (pool-46-thread-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 157 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@1ec78693):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.Net.accept(Native Method)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425)
    java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391)
    java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126)
    app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85)
    app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 201 (IPC Server Responder):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
    app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733)
    app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716)
Thread 198 (IPC Server listener on 0):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559)
Thread 208 (IPC Server handler 0 on default port 34801):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 579
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 209 (IPC Server handler 1 on default port 34801):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 579
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 210 (IPC Server handler 2 on default port 34801):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 579
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 211 (IPC Server handler 3 on default port 34801):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 579
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 212 (IPC Server handler 4 on default port 34801):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 579
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460)
    app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370)
    app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165)
Thread 215 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data5)):
  State: TIMED_WAITING
  Blocked count: 1
  Waited count: 2
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656)
Thread 216 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data6)):
  State: TIMED_WAITING
  Blocked count: 1
  Waited count: 2
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656)
Thread 221 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data5/current/BP-889361663-172.17.0.2-1733631978419):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 222 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data6/current/BP-889361663-172.17.0.2-1733631978419):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 226 (pool-36-thread-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 229 (java.util.concurrent.ThreadPoolExecutor$Worker@3c782deb[State = -1, empty queue]):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 1
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 234 (FsDatasetAsyncDiskServiceFixer):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 20
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtility.java:599)
Thread 237 (NIOServerCxnFactory.SelectorThread-1):
  State: RUNNABLE
  Blocked count: 3
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403)
    app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368)
Thread 236 (NIOServerCxnFactory.SelectorThread-0):
  State: RUNNABLE
  Blocked count: 6
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403)
    app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368)
Thread 238 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:55878):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205)
    app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181)
Thread 235 (ConnnectionExpirer):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 58
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554)
Thread 239 (SessionTracker):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 290
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163)
Thread 240 (SyncThread:0):
  State: WAITING
  Blocked count: 33
  Waited count: 744
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7892c0b7
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170)
Thread 241 (ProcessThread(sid:0 cport:55878):):
  State: WAITING
  Blocked count: 0
  Waited count: 844
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6699e2fe
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142)
Thread 242 (RequestThrottler):
  State: WAITING
  Blocked count: 1
  Waited count: 890
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@21932026
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147)
Thread 243 (NIOWorkerThread-1):
  State: WAITING
  Blocked count: 4
  Waited count: 134
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 254 (Time-limited test.named-queue-events-pool-0):
  State: WAITING
  Blocked count: 0
  Waited count: 1
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@33adabda
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47)
    app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56)
    app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159)
    app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 255 (HBase-Metrics2-1):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 380
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 256 (RS-EventLoopGroup-1-1):
  State: RUNNABLE
  Blocked count: 30
  Waited count: 0
  Stack:
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 257 (Time-limited test-SendThread(127.0.0.1:55878)):
  State: RUNNABLE
  Blocked count: 29
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
    app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332)
    app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289)
Thread 258 (Time-limited test-EventThread):
  State: WAITING
  Blocked count: 1
  Waited count: 57
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22a0a024
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550)
Thread 259 (NIOWorkerThread-2):
  State: WAITING
  Blocked count: 1
  Waited count: 134
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 260 (NIOWorkerThread-3):
  State: WAITING
  Blocked count: 0
  Waited count: 133
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 261 (NIOWorkerThread-4):
  State: WAITING
  Blocked count: 1
  Waited count: 135
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 262 (zk-event-processor-pool-0):
  State: WAITING
  Blocked count: 37
  Waited count: 88
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d76ba68
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 263 (NIOWorkerThread-5):
  State: WAITING
  Blocked count: 0
  Waited count: 134
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 264 (NIOWorkerThread-6):
  State: WAITING
  Blocked count: 1
  Waited count: 133
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 265 (NIOWorkerThread-7):
  State: WAITING
  Blocked count: 2
  Waited count: 135
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 266 (NIOWorkerThread-8):
  State: WAITING
  Blocked count: 2
  Waited count: 134
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 267 (NIOWorkerThread-9):
  State: WAITING
  Blocked count: 0
  Waited count: 134
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 268 (NIOWorkerThread-10):
  State: WAITING
  Blocked count: 2
  Waited count: 134
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 269 (NIOWorkerThread-11):
  State: WAITING
  Blocked count: 0
  Waited count: 134
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 270 (NIOWorkerThread-12):
  State: WAITING
  Blocked count: 2
  Waited count: 133
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 271 (NIOWorkerThread-13):
  State: WAITING
  Blocked count: 1
  Waited count: 134
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 272 (NIOWorkerThread-14):
  State: WAITING
  Blocked count: 1
  Waited count: 134
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 273 (NIOWorkerThread-15):
  State: WAITING
  Blocked count: 3
  Waited count: 133
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 274 (NIOWorkerThread-16):
  State: WAITING
  Blocked count: 3
  Waited count: 134
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d545fbf
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 275 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46337):
  State: WAITING
  Blocked count: 0
  Waited count: 1
  Waiting on java.util.concurrent.Semaphore$NonfairSync@35b2967
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047)
    java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318)
    app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85)
Thread 276 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46337):
  State: WAITING
  Blocked count: 293
  Waited count: 1068
  Waiting on java.util.concurrent.Semaphore$NonfairSync@48909076
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047)
    java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318)
    app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85)
Thread 277 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46337):
  State: WAITING
  Blocked count: 11
  Waited count: 83
  Waiting on java.util.concurrent.Semaphore$NonfairSync@6f96b8c0
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047)
    java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318)
    app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85)
Thread 278 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46337):
  State: WAITING
  Blocked count: 76
  Waited count: 6057
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@75bfaa0
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85)
Thread 279 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46337):
  State: WAITING
  Blocked count: 0
  Waited count: 1
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d91eb3d
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85)
Thread 280 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46337):
  State: WAITING
  Blocked count: 0
  Waited count: 1
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d91eb3d
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85)
Thread 281 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=46337):
  State: WAITING
  Blocked count: 0
  Waited count: 1
  Waiting on java.util.concurrent.Semaphore$NonfairSync@114ed3d9
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047)
    java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318)
    app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85)
Thread 282 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=46337):
  State: WAITING
  Blocked count: 0
  Waited count: 1
  Waiting on java.util.concurrent.Semaphore$NonfairSync@4b442988
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047)
    java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318)
    app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85)
Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=46337):
  State: WAITING
  Blocked count: 0
  Waited count: 1
  Waiting on java.util.concurrent.Semaphore$NonfairSync@139244ba
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047)
    java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318)
    app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85)
Thread 284 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=46337):
  State: WAITING
  Blocked count: 0
  Waited count: 3
  Waiting on java.util.concurrent.Semaphore$NonfairSync@41889df6
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047)
    java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318)
    app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55)
    app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85)
Thread 288 (RS-EventLoopGroup-3-1):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 310 (RS-EventLoopGroup-4-1):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 332 (RS-EventLoopGroup-5-1):
  State: RUNNABLE
  Blocked count: 73
  Waited count: 4
  Stack:
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 286 (M:0;428ded7e54d6:46337):
  State: TIMED_WAITING
  Blocked count: 6
  Waited count: 2766
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444)
    java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203)
    app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.shutdown(AbstractFSWAL.java:1011)
    app//org.apache.hadoop.hbase.wal.AbstractFSWALProvider.shutdown(AbstractFSWALProvider.java:184)
    app//org.apache.hadoop.hbase.wal.WALFactory.shutdown(WALFactory.java:272)
    app//org.apache.hadoop.hbase.master.region.MasterRegion.shutdownWAL(MasterRegion.java:140)
    app//org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:206)
    app//org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1758)
    app//org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:1285)
    app//org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:603)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 355 (Monitor thread for TaskMonitor):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 58
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 357 (master/428ded7e54d6:0:becomeActiveMaster-MemStoreChunkPool Statistics):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 2
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 359 (master/428ded7e54d6:0:becomeActiveMaster-MemStoreChunkPool Statistics):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 2
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 361 (org.apache.hadoop.hdfs.PeerCache@5df40f7f):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 193
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253)
    app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46)
    app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 380 (master:store-WAL-Roller):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 5753
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:179)
Thread 397 (RS-EventLoopGroup-5-2):
  State: RUNNABLE
  Blocked count: 79
  Waited count: 4
  Stack:
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 398 (RS-EventLoopGroup-5-3):
  State: RUNNABLE
  Blocked count: 90
  Waited count: 4
  Stack:
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 410 (Idle-Rpc-Conn-Sweeper-pool-0):
  State: WAITING
  Blocked count: 0
  Waited count: 68
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@488942f0
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 421 (SnapshotHandlerChoreCleaner):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 58
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 409 (RpcClient-timer-pool-0):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 57472
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598)
    app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 430 (RS-EventLoopGroup-1-2):
  State: RUNNABLE
  Blocked count: 28
  Waited count: 0
  Stack:
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 431 (RS-EventLoopGroup-1-3):
  State: RUNNABLE
  Blocked count: 29
  Waited count: 0
  Stack:
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 454 (RegionServerTracker-0):
  State: WAITING
  Blocked count: 9
  Waited count: 12
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@313bc781
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 474 (regionserver/428ded7e54d6:0.procedureResultReporter):
  State: WAITING
  Blocked count: 12
  Waited count: 25
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@24c98781
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75)
Thread 476 (regionserver/428ded7e54d6:0.procedureResultReporter):
  State: WAITING
  Blocked count: 13
  Waited count: 27
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2655cf2a
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75)
Thread 484 (regionserver/428ded7e54d6:0.procedureResultReporter):
  State: WAITING
  Blocked count: 16
  Waited count: 33
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@619c3790
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75)
Thread 529 (region-location-0):
  State: WAITING
  Blocked count: 8
  Waited count: 13
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@ef4f3ed
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 556 (Async-Client-Retry-Timer-pool-0):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 57227
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598)
    app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 569 (RPCClient-NioEventLoopGroup-6-1):
  State: RUNNABLE
  Blocked count: 6
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 591 (region-location-1):
  State: WAITING
  Blocked count: 3
  Waited count: 9
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@ef4f3ed
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 592 (region-location-2):
  State: WAITING
  Blocked count: 2
  Waited count: 7
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@ef4f3ed
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 593 (region-location-3):
  State: WAITING
  Blocked count: 2
  Waited count: 6
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@ef4f3ed
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 1015 (MutableQuantiles-0):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 440
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 1076 (RPCClient-NioEventLoopGroup-6-2):
  State: RUNNABLE
  Blocked count: 11
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 1104 (RS-EventLoopGroup-4-2):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 1115 (zk-permission-watcher-pool-0):
  State: WAITING
  Blocked count: 61
  Waited count: 93
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6e6dadab
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 1173 (RPCClient-NioEventLoopGroup-6-3):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 1174 (RS-EventLoopGroup-4-3):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 1531 (Container metrics unregistration):
  State: WAITING
  Blocked count: 10
  Waited count: 33
  Waiting on java.util.TaskQueue@47af5010
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    java.base@17.0.11/java.lang.Object.wait(Object.java:338)
    java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537)
    java.base@17.0.11/java.util.TimerThread.run(Timer.java:516)
Thread 1854 (RS-EventLoopGroup-3-2):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 1913 (RPCClient-NioEventLoopGroup-6-4):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 1914 (RS-EventLoopGroup-3-3):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308)
    app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 2824 (region-location-4):
  State: WAITING
  Blocked count: 1
  Waited count: 5
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@ef4f3ed
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170)
    java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 4946 (RPCClient-NioEventLoopGroup-6-5):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 4947 (RPCClient-NioEventLoopGroup-6-6):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 4948 (RPCClient-NioEventLoopGroup-6-7):
  State: RUNNABLE
  Blocked count: 0
  Waited count: 0
  Stack:
    java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method)
    java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129)
    java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879)
    app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997)
    app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
    app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 9175 (AsyncFSWAL-1-hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/MasterData-prefix:428ded7e54d6,46337,1733631983069):
  State: WAITING
  Blocked count: 0
  Waited count: 2
  Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6dad80b8
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465)
    java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625)
    java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 9179 (Timer for 'JobHistoryServer' metrics system):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 27
  Stack:
    java.base@17.0.11/java.lang.Object.wait(Native Method)
    java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563)
    java.base@17.0.11/java.util.TimerThread.run(Timer.java:516)
Thread 9180 (process reaper):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 3
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401)
    java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 9184 (WAL-Shutdown-0):
  State: TIMED_WAITING
  Blocked count: 0
  Waited count: 2
  Stack:
    java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method)
    java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252)
    java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.awaitTermination(ThreadPoolExecutor.java:1464)
    app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doShutdown(AsyncFSWAL.java:793)
    app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:995)
    app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:990)
    java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
Thread 9185 (Close-WAL-Writer-0):
  State: TIMED_WAITING
  Blocked count: 1
  Waited count: 2
  Stack:
    java.base@17.0.11/java.lang.Thread.sleep(Native Method)
    app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:166)
    app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96)
    app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.recoverLease(AsyncFSWAL.java:722)
    app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:735)
    app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL$$Lambda$1130/0x00007eff89120430.run(Unknown Source)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
    java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
    java.base@17.0.11/java.lang.Thread.run(Thread.java:840)
2024-12-08T04:36:04,288 WARN  [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=1 on file=hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/MasterData/WALs/428ded7e54d6,46337,1733631983069/428ded7e54d6%2C46337%2C1733631983069.1733631984748 after 4001ms
java.io.IOException: Filesystem closed
	at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?]
	at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?]
	at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?]
	at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?]
	at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?]
	at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?]
	at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?]
	at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?]
	at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?]
	at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?]
	at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?]
	at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?]
	at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?]
	at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.recoverLease(AsyncFSWAL.java:722) ~[classes/:?]
	at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:735) ~[classes/:?]
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?]
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?]
	at java.lang.Thread.run(Thread.java:840) ~[?:?]
2024-12-08T04:36:05,285 ERROR [WAL-Shutdown-0 {}] wal.AsyncFSWAL(794): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.async.wait.on.shutdown.seconds"
2024-12-08T04:36:05,285 INFO  [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting.
2024-12-08T04:36:05,285 INFO  [M:0;428ded7e54d6:46337 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down.
2024-12-08T04:36:05,285 INFO  [M:0;428ded7e54d6:46337 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:46337
2024-12-08T04:36:05,287 DEBUG [M:0;428ded7e54d6:46337 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/428ded7e54d6,46337,1733631983069 already deleted, retry=false
2024-12-08T04:36:05,290 WARN  [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41407/user/jenkins/test-data/c417a2c8-fab4-ecbb-d5cd-c67ec3aa8720/MasterData/WALs/428ded7e54d6,46337,1733631983069/428ded7e54d6%2C46337%2C1733631983069.1733631984748
java.lang.reflect.InvocationTargetException: null
	at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?]
	at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?]
	at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?]
	at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?]
	at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?]
	at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?]
	at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?]
	at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.recoverLease(AsyncFSWAL.java:722) ~[classes/:?]
	at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:735) ~[classes/:?]
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?]
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?]
	at java.lang.Thread.run(Thread.java:840) ~[?:?]
Caused by: java.io.IOException: Filesystem closed
	at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?]
	at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?]
	at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?]
	at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?]
	at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?]
	at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?]
	... 12 more
2024-12-08T04:36:05,389 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null
2024-12-08T04:36:05,389 INFO  [M:0;428ded7e54d6:46337 {}] regionserver.HRegionServer(1307): Exiting; stopping=428ded7e54d6,46337,1733631983069; zookeeper connection closed.
2024-12-08T04:36:05,389 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46337-0x1006fe072e80000, quorum=127.0.0.1:55878, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null
2024-12-08T04:36:05,393 INFO  [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3003ef5d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode}
2024-12-08T04:36:05,393 INFO  [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@524316e2{HTTP/1.1, (http/1.1)}{localhost:0}
2024-12-08T04:36:05,393 INFO  [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging
2024-12-08T04:36:05,393 INFO  [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2e1b48b6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED}
2024-12-08T04:36:05,393 INFO  [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3e5b9a3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/hadoop.log.dir/,STOPPED}
2024-12-08T04:36:05,395 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit.
2024-12-08T04:36:05,395 WARN  [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup]
2024-12-08T04:36:05,395 WARN  [BP-889361663-172.17.0.2-1733631978419 heartbeating to localhost/127.0.0.1:41407 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted
2024-12-08T04:36:05,395 WARN  [BP-889361663-172.17.0.2-1733631978419 heartbeating to localhost/127.0.0.1:41407 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-889361663-172.17.0.2-1733631978419 (Datanode Uuid 7b7eb7da-b69b-41a3-a625-fd862532a332) service to localhost/127.0.0.1:41407
2024-12-08T04:36:05,396 WARN  [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data5/current/BP-889361663-172.17.0.2-1733631978419 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted
2024-12-08T04:36:05,397 WARN  [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data6/current/BP-889361663-172.17.0.2-1733631978419 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted
2024-12-08T04:36:05,397 WARN  [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func
2024-12-08T04:36:05,399 INFO  [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7a29fbf5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode}
2024-12-08T04:36:05,399 INFO  [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@53bdbdf4{HTTP/1.1, (http/1.1)}{localhost:0}
2024-12-08T04:36:05,399 INFO  [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging
2024-12-08T04:36:05,400 INFO  [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@45f72ff{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED}
2024-12-08T04:36:05,400 INFO  [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4ca832e8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/hadoop.log.dir/,STOPPED}
2024-12-08T04:36:05,401 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit.
2024-12-08T04:36:05,401 WARN  [BP-889361663-172.17.0.2-1733631978419 heartbeating to localhost/127.0.0.1:41407 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted
2024-12-08T04:36:05,401 WARN  [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup]
2024-12-08T04:36:05,401 WARN  [BP-889361663-172.17.0.2-1733631978419 heartbeating to localhost/127.0.0.1:41407 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-889361663-172.17.0.2-1733631978419 (Datanode Uuid 02ad592b-645e-4d06-8605-8493f90ebc27) service to localhost/127.0.0.1:41407
2024-12-08T04:36:05,402 WARN  [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data3/current/BP-889361663-172.17.0.2-1733631978419 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted
2024-12-08T04:36:05,402 WARN  [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data4/current/BP-889361663-172.17.0.2-1733631978419 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted
2024-12-08T04:36:05,402 WARN  [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func
2024-12-08T04:36:05,404 INFO  [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@420d534c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode}
2024-12-08T04:36:05,404 INFO  [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@16165456{HTTP/1.1, (http/1.1)}{localhost:0}
2024-12-08T04:36:05,405 INFO  [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging
2024-12-08T04:36:05,405 INFO  [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7622634b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED}
2024-12-08T04:36:05,405 INFO  [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@31a0decf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/hadoop.log.dir/,STOPPED}
2024-12-08T04:36:05,406 WARN  [BP-889361663-172.17.0.2-1733631978419 heartbeating to localhost/127.0.0.1:41407 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted
2024-12-08T04:36:05,406 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit.
2024-12-08T04:36:05,406 WARN  [BP-889361663-172.17.0.2-1733631978419 heartbeating to localhost/127.0.0.1:41407 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-889361663-172.17.0.2-1733631978419 (Datanode Uuid 5630b6cd-499b-4a4d-a30a-5d4649b5feb4) service to localhost/127.0.0.1:41407
2024-12-08T04:36:05,406 WARN  [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup]
2024-12-08T04:36:05,407 WARN  [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data1/current/BP-889361663-172.17.0.2-1733631978419 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted
2024-12-08T04:36:05,407 WARN  [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/cluster_9480fdc3-dd5e-8540-4a65-5e2e0b546283/dfs/data/data2/current/BP-889361663-172.17.0.2-1733631978419 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted
2024-12-08T04:36:05,407 WARN  [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func
2024-12-08T04:36:05,414 INFO  [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7883a2cb{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs}
2024-12-08T04:36:05,415 INFO  [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2d3d9b09{HTTP/1.1, (http/1.1)}{localhost:0}
2024-12-08T04:36:05,415 INFO  [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging
2024-12-08T04:36:05,415 INFO  [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@343317a8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED}
2024-12-08T04:36:05,415 INFO  [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1a82d853{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/8fdb2168-5acb-1166-6327-5557208bd2f3/hadoop.log.dir/,STOPPED}
2024-12-08T04:36:05,429 INFO  [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers
2024-12-08T04:36:05,700 INFO  [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down